Merge tag 'kvm-x86-pmu-6.4' of https://github.com/kvm-x86/linux into HEAD
authorPaolo Bonzini <pbonzini@redhat.com>
Wed, 26 Apr 2023 19:53:36 +0000 (15:53 -0400)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 26 Apr 2023 19:53:36 +0000 (15:53 -0400)
KVM x86 PMU changes for 6.4:

 - Disallow virtualizing legacy LBRs if architectural LBRs are available,
   the two are mutually exclusive in hardware

 - Disallow writes to immutable feature MSRs (notably PERF_CAPABILITIES)
   after KVM_RUN, and overhaul the vmx_pmu_caps selftest to better
   validate PERF_CAPABILITIES

 - Apply PMU filters to emulated events and add test coverage to the
   pmu_event_filter selftest

 - Misc cleanups and fixes

832 files changed:
.gitignore
.mailmap
Documentation/driver-api/vfio.rst
Documentation/filesystems/vfs.rst
Documentation/firmware-guide/acpi/enumeration.rst
Documentation/maintainer/rebasing-and-merging.rst
Documentation/mm/hugetlbfs_reserv.rst
Documentation/mm/physical_memory.rst
Documentation/netlink/genetlink-c.yaml
Documentation/netlink/genetlink-legacy.yaml
Documentation/netlink/genetlink.yaml
Documentation/netlink/specs/ethtool.yaml
Documentation/netlink/specs/fou.yaml
Documentation/netlink/specs/netdev.yaml
Documentation/networking/xdp-rx-metadata.rst
Documentation/process/programming-language.rst
Documentation/process/submitting-patches.rst
Documentation/scheduler/sched-capacity.rst
Documentation/translations/zh_CN/mm/hugetlbfs_reserv.rst
Documentation/translations/zh_CN/scheduler/sched-capacity.rst
Documentation/usb/gadget_uvc.rst [new file with mode: 0644]
Documentation/usb/index.rst
Documentation/userspace-api/netlink/specs.rst
Documentation/virt/kvm/api.rst
Documentation/virt/kvm/devices/vm.rst
Documentation/virt/kvm/locking.rst
MAINTAINERS
Makefile
arch/arm/boot/dts/e60k02.dtsi
arch/arm/boot/dts/e70k02.dtsi
arch/arm/boot/dts/imx6sl-tolino-shine2hd.dts
arch/arm/boot/dts/qcom-apq8026-lg-lenok.dts
arch/arm/lib/uaccess_with_memcpy.c
arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-kbox-a-230-ls.dts
arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var1.dts
arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var2.dts
arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts
arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28.dts
arch/arm64/boot/dts/freescale/imx8-ss-lsio.dtsi
arch/arm64/boot/dts/freescale/imx8dxl-evk.dts
arch/arm64/boot/dts/freescale/imx8mm-nitrogen-r2.dts
arch/arm64/boot/dts/freescale/imx8mn.dtsi
arch/arm64/boot/dts/freescale/imx8mp.dtsi
arch/arm64/boot/dts/freescale/imx93.dtsi
arch/arm64/boot/dts/nvidia/tegra194.dtsi
arch/arm64/boot/dts/nvidia/tegra234.dtsi
arch/arm64/boot/dts/qcom/msm8916-thwc-uf896.dts
arch/arm64/boot/dts/qcom/msm8916-thwc-ufi001c.dts
arch/arm64/boot/dts/qcom/msm8916-ufi.dtsi
arch/arm64/boot/dts/qcom/sa8540p-ride.dts
arch/arm64/boot/dts/qcom/sc7280.dtsi
arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
arch/arm64/boot/dts/qcom/sc8280xp.dtsi
arch/arm64/boot/dts/qcom/sm6115.dtsi
arch/arm64/boot/dts/qcom/sm6375.dtsi
arch/arm64/boot/dts/qcom/sm8150.dtsi
arch/arm64/boot/dts/qcom/sm8250-xiaomi-elish.dts
arch/arm64/boot/dts/qcom/sm8350.dtsi
arch/arm64/boot/dts/qcom/sm8450.dtsi
arch/arm64/boot/dts/qcom/sm8550.dtsi
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/kvm_mmu.h
arch/arm64/include/asm/sysreg.h
arch/arm64/include/uapi/asm/kvm.h
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/efi-header.S
arch/arm64/kvm/arch_timer.c
arch/arm64/kvm/arm.c
arch/arm64/kvm/guest.c
arch/arm64/kvm/handle_exit.c
arch/arm64/kvm/hyp/include/hyp/switch.h
arch/arm64/kvm/hyp/nvhe/debug-sr.c
arch/arm64/kvm/hyp/nvhe/mem_protect.c
arch/arm64/kvm/hyp/nvhe/switch.c
arch/arm64/kvm/hyp/nvhe/timer-sr.c
arch/arm64/kvm/hyp/nvhe/tlb.c
arch/arm64/kvm/hyp/vhe/switch.c
arch/arm64/kvm/hyp/vhe/sysreg-sr.c
arch/arm64/kvm/hypercalls.c
arch/arm64/kvm/pmu-emul.c
arch/arm64/kvm/psci.c
arch/arm64/kvm/reset.c
arch/arm64/kvm/sys_regs.c
arch/arm64/kvm/trace_arm.h
arch/arm64/kvm/vgic/vgic-debug.c
arch/arm64/kvm/vgic/vgic-init.c
arch/arm64/kvm/vgic/vgic-its.c
arch/arm64/kvm/vgic/vgic-kvm-device.c
arch/arm64/kvm/vgic/vgic-mmio-v3.c
arch/arm64/kvm/vgic/vgic-mmio.c
arch/arm64/kvm/vgic/vgic-v4.c
arch/arm64/kvm/vgic/vgic.c
arch/arm64/kvm/vgic/vgic.h
arch/arm64/tools/cpucaps
arch/arm64/tools/sysreg
arch/mips/include/asm/kvm_host.h
arch/mips/kvm/vz.c
arch/powerpc/include/asm/kasan.h
arch/powerpc/include/asm/string.h
arch/powerpc/kernel/prom_init_check.sh
arch/powerpc/mm/fault.c
arch/powerpc/platforms/pseries/Kconfig
arch/riscv/Kconfig
arch/riscv/Makefile
arch/riscv/include/asm/mmu.h
arch/riscv/include/asm/tlbflush.h
arch/riscv/mm/context.c
arch/riscv/mm/fault.c
arch/riscv/mm/tlbflush.c
arch/s390/boot/ipl_report.c
arch/s390/configs/debug_defconfig
arch/s390/configs/defconfig
arch/s390/configs/zfcpdump_defconfig
arch/s390/kvm/interrupt.c
arch/s390/kvm/pci.c
arch/s390/kvm/vsie.c
arch/s390/pci/pci.c
arch/s390/pci/pci_bus.c
arch/s390/pci/pci_bus.h
arch/x86/events/amd/core.c
arch/x86/include/asm/cpufeatures.h
arch/x86/include/asm/kvm-x86-ops.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/sev-common.h
arch/x86/include/asm/xen/cpuid.h
arch/x86/include/uapi/asm/kvm.h
arch/x86/kernel/cpu/mce/core.c
arch/x86/kernel/cpu/resctrl/ctrlmondata.c
arch/x86/kernel/cpu/resctrl/internal.h
arch/x86/kernel/cpu/resctrl/rdtgroup.c
arch/x86/kernel/fpu/xstate.c
arch/x86/kernel/ftrace_64.S
arch/x86/kernel/sev.c
arch/x86/kvm/cpuid.c
arch/x86/kvm/emulate.c
arch/x86/kvm/kvm_cache_regs.h
arch/x86/kvm/kvm_onhyperv.c
arch/x86/kvm/kvm_onhyperv.h
arch/x86/kvm/mmu.h
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/mmu_internal.h
arch/x86/kvm/mmu/paging_tmpl.h
arch/x86/kvm/mmu/spte.c
arch/x86/kvm/mmu/tdp_iter.h
arch/x86/kvm/mmu/tdp_mmu.c
arch/x86/kvm/pmu.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm_onhyperv.h
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/vmx/vmx.h
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h
arch/x86/mm/cpu_entry_area.c
arch/x86/mm/mem_encrypt_identity.c
arch/x86/xen/Makefile
arch/x86/xen/enlighten_pv.c
arch/x86/xen/enlighten_pvh.c
arch/x86/xen/time.c
arch/x86/xen/vga.c
arch/x86/xen/xen-ops.h
block/Kconfig
block/blk-core.c
block/blk-mq.c
block/blk-mq.h
crypto/asymmetric_keys/pkcs7_verify.c
crypto/asymmetric_keys/verify_pefile.c
drivers/accel/Makefile
drivers/acpi/pptt.c
drivers/acpi/processor_driver.c
drivers/acpi/processor_thermal.c
drivers/acpi/resource.c
drivers/acpi/video_detect.c
drivers/acpi/x86/utils.c
drivers/ata/pata_parport/pata_parport.c
drivers/atm/idt77252.c
drivers/block/loop.c
drivers/block/null_blk/main.c
drivers/block/sunvdc.c
drivers/block/ublk_drv.c
drivers/bluetooth/btintel.c
drivers/bluetooth/btintel.h
drivers/bluetooth/btqcomsmd.c
drivers/bluetooth/btsdio.c
drivers/bluetooth/btusb.c
drivers/bus/imx-weim.c
drivers/clk/Kconfig
drivers/clk/bcm/clk-bcm2835-aux.c
drivers/clk/bcm/clk-bcm2835.c
drivers/clk/clk-fixed-mmio.c
drivers/clk/clk-fsl-sai.c
drivers/clk/clk-k210.c
drivers/clk/hisilicon/clk-hi3559a.c
drivers/clk/microchip/clk-mpfs-ccc.c
drivers/cpuidle/cpuidle-psci-domain.c
drivers/firmware/arm_scmi/bus.c
drivers/firmware/arm_scmi/driver.c
drivers/firmware/arm_scmi/mailbox.c
drivers/firmware/efi/earlycon.c
drivers/firmware/efi/efi-init.c
drivers/firmware/efi/libstub/Makefile.zboot
drivers/firmware/efi/libstub/arm64-stub.c
drivers/firmware/efi/libstub/arm64.c
drivers/firmware/efi/libstub/efi-stub-entry.c
drivers/firmware/efi/libstub/efi-stub.c
drivers/firmware/efi/libstub/efistub.h
drivers/firmware/efi/libstub/randomalloc.c
drivers/firmware/efi/libstub/screen_info.c
drivers/firmware/efi/libstub/smbios.c
drivers/firmware/efi/libstub/zboot-header.S
drivers/firmware/efi/libstub/zboot.c
drivers/firmware/efi/sysfb_efi.c
drivers/firmware/qcom_scm.c
drivers/firmware/sysfb.c
drivers/firmware/sysfb_simplefb.c
drivers/firmware/xilinx/zynqmp.c
drivers/gpio/gpiolib-acpi.c
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
drivers/gpu/drm/amd/amdgpu/nv.c
drivers/gpu/drm/amd/amdgpu/soc21.c
drivers/gpu/drm/amd/amdgpu/vi.c
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
drivers/gpu/drm/amd/amdkfd/kfd_device.c
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
drivers/gpu/drm/amd/amdkfd/kfd_module.c
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
drivers/gpu/drm/amd/amdkfd/kfd_process.c
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
drivers/gpu/drm/amd/display/dc/link/link_detection.c
drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
drivers/gpu/drm/bridge/lontium-lt8912b.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/drm_gem_shmem_helper.c
drivers/gpu/drm/drm_panel_orientation_quirks.c
drivers/gpu/drm/i915/display/intel_crtc.c
drivers/gpu/drm/i915/display/intel_display.c
drivers/gpu/drm/i915/display/intel_display_types.h
drivers/gpu/drm/i915/display/intel_dmc.c
drivers/gpu/drm/i915/display/intel_dp_mst.c
drivers/gpu/drm/i915/display/intel_fbdev.c
drivers/gpu/drm/i915/display/intel_psr.c
drivers/gpu/drm/i915/display/intel_snps_phy.c
drivers/gpu/drm/i915/gt/intel_gt.c
drivers/gpu/drm/i915/gt/intel_gt_pm.c
drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
drivers/gpu/drm/i915/gt/intel_rc6.c
drivers/gpu/drm/i915/gt/intel_sseu.h
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c
drivers/gpu/drm/i915/i915_active.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/meson/meson_drv.c
drivers/gpu/drm/meson/meson_dw_hdmi.c
drivers/gpu/drm/meson/meson_vpp.c
drivers/gpu/drm/msm/msm_gem_shrinker.c
drivers/gpu/drm/panfrost/panfrost_mmu.c
drivers/gpu/drm/sun4i/sun4i_drv.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_device.c
drivers/gpu/drm/virtio/virtgpu_vq.c
drivers/gpu/host1x/dev.c
drivers/hwmon/adt7475.c
drivers/hwmon/hwmon.c
drivers/hwmon/ina3221.c
drivers/hwmon/it87.c
drivers/hwmon/ltc2992.c
drivers/hwmon/peci/cputemp.c
drivers/hwmon/pmbus/adm1266.c
drivers/hwmon/pmbus/ucd9000.c
drivers/hwmon/tmp513.c
drivers/hwmon/xgene-hwmon.c
drivers/i2c/busses/i2c-hisi.c
drivers/i2c/busses/i2c-imx-lpi2c.c
drivers/i2c/busses/i2c-mxs.c
drivers/i2c/busses/i2c-xgene-slimpro.c
drivers/interconnect/core.c
drivers/interconnect/imx/imx.c
drivers/interconnect/qcom/icc-rpm.c
drivers/interconnect/qcom/icc-rpmh.c
drivers/interconnect/qcom/msm8974.c
drivers/interconnect/qcom/osm-l3.c
drivers/interconnect/qcom/qcm2290.c
drivers/interconnect/qcom/sm8450.c
drivers/interconnect/qcom/sm8550.c
drivers/interconnect/samsung/exynos.c
drivers/md/Kconfig
drivers/md/dm-crypt.c
drivers/md/dm-stats.c
drivers/md/dm-stats.h
drivers/md/dm-thin.c
drivers/md/dm.c
drivers/md/md.c
drivers/media/i2c/m5mols/m5mols_core.c
drivers/memory/tegra/mc.c
drivers/memory/tegra/tegra124-emc.c
drivers/memory/tegra/tegra20-emc.c
drivers/memory/tegra/tegra30-emc.c
drivers/mmc/host/dw_mmc-starfive.c
drivers/mmc/host/sdhci_am654.c
drivers/net/bonding/bond_main.c
drivers/net/can/cc770/cc770_platform.c
drivers/net/dsa/b53/b53_mmap.c
drivers/net/dsa/microchip/ksz_common.c
drivers/net/dsa/mt7530.c
drivers/net/dsa/mv88e6xxx/chip.c
drivers/net/ethernet/amazon/ena/ena_ethtool.c
drivers/net/ethernet/amazon/ena/ena_netdev.c
drivers/net/ethernet/aquantia/atlantic/aq_ring.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
drivers/net/ethernet/cadence/macb_main.c
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
drivers/net/ethernet/cavium/thunder/nicvf_main.c
drivers/net/ethernet/davicom/dm9000.c
drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/fec_mpc52xx.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/google/gve/gve_ethtool.c
drivers/net/ethernet/i825xx/sni_82596.c
drivers/net/ethernet/ibm/emac/core.c
drivers/net/ethernet/ibm/emac/rgmii.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/iavf/iavf_common.c
drivers/net/ethernet/intel/iavf/iavf_main.c
drivers/net/ethernet/intel/iavf/iavf_txrx.c
drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
drivers/net/ethernet/intel/ice/ice.h
drivers/net/ethernet/intel/ice/ice_lib.c
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/ice/ice_sriov.c
drivers/net/ethernet/intel/ice/ice_txrx.c
drivers/net/ethernet/intel/ice/ice_xsk.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/igbvf/netdev.c
drivers/net/ethernet/intel/igbvf/vf.c
drivers/net/ethernet/intel/igc/igc_main.c
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
drivers/net/ethernet/mediatek/mtk_eth_soc.h
drivers/net/ethernet/mediatek/mtk_sgmii.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc/act_stats.c
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/mscc/ocelot_stats.c
drivers/net/ethernet/natsemi/sonic.c
drivers/net/ethernet/qlogic/qed/qed_dev.c
drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
drivers/net/ethernet/qlogic/qed/qed_sriov.c
drivers/net/ethernet/qualcomm/emac/emac.c
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/renesas/rswitch.c
drivers/net/ethernet/renesas/rswitch.h
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/stmicro/stmmac/common.h
drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/sun/ldmvsw.c
drivers/net/ethernet/sun/niu.c
drivers/net/ethernet/sun/sunvnet.c
drivers/net/ethernet/ti/am65-cpts.c
drivers/net/ethernet/ti/cpsw-phy-sel.c
drivers/net/ethernet/ti/netcp_ethss.c
drivers/net/ethernet/toshiba/ps3_gelic_net.c
drivers/net/ethernet/toshiba/ps3_gelic_net.h
drivers/net/ethernet/via/via-velocity.c
drivers/net/ethernet/via/via-velocity.h
drivers/net/ethernet/xilinx/ll_temac_main.c
drivers/net/ethernet/xircom/xirc2ps_cs.c
drivers/net/ipa/gsi_reg.c
drivers/net/ipa/gsi_reg.h
drivers/net/ipa/ipa_reg.c
drivers/net/ipa/ipa_reg.h
drivers/net/ipa/reg.h
drivers/net/ipa/reg/gsi_reg-v4.5.c
drivers/net/ipa/reg/gsi_reg-v4.9.c
drivers/net/ipvlan/ipvlan_l3s.c
drivers/net/mdio/acpi_mdio.c
drivers/net/mdio/mdio-thunder.c
drivers/net/mdio/of_mdio.c
drivers/net/phy/mdio_devres.c
drivers/net/phy/mscc/mscc_main.c
drivers/net/phy/nxp-c45-tja11xx.c
drivers/net/phy/phy.c
drivers/net/phy/sfp.c
drivers/net/phy/smsc.c
drivers/net/usb/asix_devices.c
drivers/net/usb/lan78xx.c
drivers/net/usb/plusb.c
drivers/net/usb/smsc75xx.c
drivers/net/usb/smsc95xx.c
drivers/net/veth.c
drivers/net/virtio_net.c
drivers/net/wan/fsl_ucc_hdlc.c
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/marvell/mwifiex/pcie.c
drivers/net/wireless/marvell/mwifiex/sdio.c
drivers/net/wireless/mediatek/mt76/mac80211.c
drivers/net/wireless/mediatek/mt76/mt76.h
drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
drivers/net/wireless/mediatek/mt76/mt7915/init.c
drivers/net/wireless/ti/wlcore/spi.c
drivers/nfc/pn533/usb.c
drivers/nfc/st-nci/ndlc.c
drivers/nvme/host/core.c
drivers/nvme/host/ioctl.c
drivers/nvme/host/multipath.c
drivers/nvme/host/pci.c
drivers/nvme/host/tcp.c
drivers/nvme/target/core.c
drivers/nvmem/core.c
drivers/pci/bus.c
drivers/platform/chrome/cros_ec_chardev.c
drivers/power/supply/axp288_fuel_gauge.c
drivers/power/supply/bq24190_charger.c
drivers/power/supply/cros_usbpd-charger.c
drivers/power/supply/da9150-charger.c
drivers/power/supply/rk817_charger.c
drivers/scsi/device_handler/scsi_dh_alua.c
drivers/scsi/hosts.c
drivers/scsi/mpi3mr/mpi3mr.h
drivers/scsi/mpi3mr/mpi3mr_fw.c
drivers/scsi/mpi3mr/mpi3mr_os.c
drivers/scsi/mpi3mr/mpi3mr_transport.c
drivers/scsi/mpt3sas/mpt3sas_transport.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/scsi.c
drivers/scsi/scsi_devinfo.c
drivers/scsi/scsi_scan.c
drivers/soc/qcom/llcc-qcom.c
drivers/soc/qcom/rmtfs_mem.c
drivers/tee/amdtee/core.c
drivers/thermal/thermal_core.c
drivers/thermal/thermal_core.h
drivers/thermal/thermal_sysfs.c
drivers/thunderbolt/debugfs.c
drivers/thunderbolt/nhi.c
drivers/thunderbolt/nhi_regs.h
drivers/thunderbolt/quirks.c
drivers/thunderbolt/retimer.c
drivers/thunderbolt/sb_regs.h
drivers/thunderbolt/switch.c
drivers/thunderbolt/tb.h
drivers/thunderbolt/usb4.c
drivers/tty/hvc/hvc_xen.c
drivers/tty/serdev/core.c
drivers/tty/serial/8250/8250_em.c
drivers/tty/serial/8250/8250_fsl.c
drivers/tty/serial/8250/Kconfig
drivers/tty/serial/Kconfig
drivers/tty/serial/fsl_lpuart.c
drivers/tty/serial/qcom_geni_serial.c
drivers/tty/vt/vt.c
drivers/ufs/core/ufshcd.c
drivers/usb/cdns3/cdns3-pci-wrap.c
drivers/usb/cdns3/cdnsp-ep0.c
drivers/usb/cdns3/cdnsp-pci.c
drivers/usb/chipidea/ci.h
drivers/usb/chipidea/core.c
drivers/usb/chipidea/otg.c
drivers/usb/dwc2/drd.c
drivers/usb/dwc2/gadget.c
drivers/usb/dwc2/platform.c
drivers/usb/dwc3/core.h
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/composite.c
drivers/usb/gadget/function/u_audio.c
drivers/usb/misc/onboard_usb_hub.c
drivers/usb/misc/onboard_usb_hub.h
drivers/usb/storage/unusual_uas.h
drivers/usb/typec/tcpm/tcpm.c
drivers/usb/typec/ucsi/ucsi.c
drivers/usb/typec/ucsi/ucsi_acpi.c
drivers/vdpa/mlx5/core/mlx5_vdpa.h
drivers/vdpa/mlx5/net/mlx5_vnet.c
drivers/vdpa/vdpa_sim/vdpa_sim.c
drivers/vdpa/virtio_pci/vp_vdpa.c
drivers/vfio/pci/mlx5/main.c
drivers/vhost/vdpa.c
drivers/video/fbdev/amba-clcd.c
drivers/video/fbdev/au1200fb.c
drivers/video/fbdev/bw2.c
drivers/video/fbdev/cg3.c
drivers/video/fbdev/chipsfb.c
drivers/video/fbdev/clps711x-fb.c
drivers/video/fbdev/core/fb_defio.c
drivers/video/fbdev/geode/lxfb_core.c
drivers/video/fbdev/intelfb/intelfbdrv.c
drivers/video/fbdev/nvidia/nvidia.c
drivers/video/fbdev/offb.c
drivers/video/fbdev/omap/Makefile
drivers/video/fbdev/omap/lcd_osk.c [deleted file]
drivers/video/fbdev/omap/omapfb_main.c
drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c
drivers/video/fbdev/pxa3xx-gcu.c
drivers/video/fbdev/sm501fb.c
drivers/video/fbdev/stifb.c
drivers/video/fbdev/tcx.c
drivers/video/fbdev/tgafb.c
drivers/video/fbdev/wm8505fb.c
drivers/video/fbdev/xilinxfb.c
drivers/video/logo/pnmtologo.c
drivers/virt/coco/sev-guest/sev-guest.c
drivers/xen/xenfs/xensyms.c
fs/btrfs/block-group.c
fs/btrfs/free-space-cache.c
fs/btrfs/fs.h
fs/btrfs/inode.c
fs/btrfs/space-info.c
fs/btrfs/space-info.h
fs/btrfs/volumes.c
fs/btrfs/zoned.c
fs/cifs/cached_dir.c
fs/cifs/cifs_debug.c
fs/cifs/cifs_dfs_ref.c
fs/cifs/cifs_fs_sb.h
fs/cifs/cifsfs.c
fs/cifs/cifsglob.h
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/dfs.c
fs/cifs/dfs.h
fs/cifs/dfs_cache.c
fs/cifs/dfs_cache.h
fs/cifs/file.c
fs/cifs/fs_context.h
fs/cifs/link.c
fs/cifs/misc.c
fs/cifs/smb2inode.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/cifs/smb2transport.c
fs/cifs/trace.h
fs/cifs/transport.c
fs/crypto/keyring.c
fs/ext4/namei.c
fs/gfs2/dentry.c
fs/ksmbd/auth.c
fs/ksmbd/connection.c
fs/ksmbd/connection.h
fs/ksmbd/smb2pdu.c
fs/ksmbd/smb_common.c
fs/ksmbd/smb_common.h
fs/ksmbd/transport_rdma.c
fs/ksmbd/transport_tcp.c
fs/lockd/clnt4xdr.c
fs/lockd/xdr4.c
fs/nfs/Kconfig
fs/nfs/dir.c
fs/nfs/read.c
fs/nfsd/Kconfig
fs/nfsd/vfs.c
fs/nilfs2/ioctl.c
fs/ocfs2/aops.c
fs/super.c
fs/verity/enable.c
fs/verity/verify.c
fs/xfs/Makefile
fs/xfs/libxfs/xfs_alloc.c
fs/xfs/xfs_dahash_test.c [new file with mode: 0644]
fs/xfs/xfs_dahash_test.h [new file with mode: 0644]
fs/xfs/xfs_iomap.c
fs/xfs/xfs_super.c
fs/xfs/xfs_trace.h
fs/zonefs/file.c
include/acpi/acpi_bus.h
include/clocksource/arm_arch_timer.h
include/drm/drm_bridge.h
include/drm/drm_gem.h
include/kvm/arm_arch_timer.h
include/kvm/arm_hypercalls.h
include/kvm/arm_vgic.h
include/linux/acpi_mdio.h
include/linux/blk-mq.h
include/linux/blkdev.h
include/linux/clk-provider.h
include/linux/context_tracking.h
include/linux/context_tracking_state.h
include/linux/cpumask.h
include/linux/efi.h
include/linux/fb.h
include/linux/find.h
include/linux/interconnect-provider.h
include/linux/io_uring.h
include/linux/kvm_host.h
include/linux/kvm_types.h
include/linux/lockd/xdr4.h
include/linux/netdevice.h
include/linux/nvme-tcp.h
include/linux/nvme.h
include/linux/of_mdio.h
include/linux/pci.h
include/linux/percpu_counter.h
include/linux/stmmac.h
include/linux/sysfb.h
include/linux/thermal.h
include/linux/tracepoint.h
include/net/bluetooth/hci_core.h
include/net/xdp.h
include/scsi/scsi_device.h
include/scsi/scsi_devinfo.h
include/trace/events/mmap.h
include/uapi/linux/fou.h
include/uapi/linux/kvm.h
include/uapi/linux/netdev.h
include/uapi/linux/rtnetlink.h
include/xen/interface/platform.h
init/main.c
io_uring/filetable.c
io_uring/msg_ring.c
io_uring/net.c
io_uring/rsrc.c
io_uring/sqpoll.c
io_uring/uring_cmd.c
kernel/bpf/core.c
kernel/bpf/verifier.c
kernel/compat.c
kernel/entry/common.c
kernel/events/core.c
kernel/fork.c
kernel/kcsan/Makefile
kernel/sched/core.c
kernel/sched/fair.c
kernel/trace/ftrace.c
kernel/trace/kprobe_event_gen_test.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace_events_hist.c
kernel/trace/trace_hwlat.c
kernel/trace/trace_osnoise.c
lib/dhry_run.c
lib/find_bit.c
lib/maple_tree.c
lib/percpu_counter.c
lib/test_maple_tree.c
lib/zstd/common/zstd_deps.h
lib/zstd/decompress/huf_decompress.c
lib/zstd/decompress/zstd_decompress.c
mm/damon/paddr.c
mm/huge_memory.c
mm/kfence/Makefile
mm/kfence/core.c
mm/ksm.c
mm/migrate.c
mm/mincore.c
mm/mmap.c
mm/mprotect.c
mm/page_alloc.c
mm/slab.c
mm/vmalloc.c
net/bluetooth/hci_core.c
net/bluetooth/hci_sync.c
net/bluetooth/iso.c
net/bluetooth/l2cap_core.c
net/bluetooth/mgmt.c
net/core/netdev-genl-gen.c
net/core/netdev-genl-gen.h
net/core/xdp.c
net/dsa/slave.c
net/dsa/tag.c
net/dsa/tag_brcm.c
net/hsr/hsr_framereg.c
net/ipv4/fib_frontend.c
net/ipv4/fou_nl.c
net/ipv4/fou_nl.h
net/ipv4/inet_hashtables.c
net/ipv4/ip_gre.c
net/ipv4/ip_tunnel.c
net/ipv4/tcp_output.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_tunnel.c
net/iucv/iucv.c
net/mac80211/cfg.c
net/mac80211/ieee80211_i.h
net/mac80211/main.c
net/mac80211/rx.c
net/mac80211/util.c
net/mac80211/wme.c
net/mptcp/pm_netlink.c
net/mptcp/protocol.c
net/mptcp/protocol.h
net/mptcp/subflow.c
net/ncsi/ncsi-manage.c
net/netfilter/nft_masq.c
net/netfilter/nft_nat.c
net/netfilter/nft_redir.c
net/sched/act_api.c
net/smc/af_smc.c
net/smc/smc_cdc.c
net/smc/smc_core.c
net/sunrpc/auth_gss/gss_krb5_crypto.c
net/vmw_vsock/virtio_transport_common.c
net/wireless/nl80211.c
net/xdp/xdp_umem.c
net/xfrm/xfrm_state.c
net/xfrm/xfrm_user.c
scripts/.gitignore
scripts/Makefile
scripts/Makefile.package
scripts/check-git [new file with mode: 0755]
scripts/checksyscalls.sh
scripts/kallsyms.c
scripts/kconfig/confdata.c
scripts/list-gitignored.c [deleted file]
scripts/package/builddeb
scripts/package/deb-build-option
scripts/package/gen-diff-patch [new file with mode: 0755]
scripts/package/mkdebian
scripts/package/mkspec
scripts/setlocalversion
security/keys/request_key.c
sound/hda/intel-dsp-config.c
sound/pci/asihpi/hpi6205.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_ca0132.c
sound/pci/hda/patch_realtek.c
sound/soc/codecs/da7219-aad.c
sound/soc/codecs/hdmi-codec.c
sound/soc/codecs/lpass-tx-macro.c
sound/soc/fsl/Kconfig
sound/soc/intel/avs/boards/da7219.c
sound/soc/intel/avs/boards/max98357a.c
sound/soc/intel/avs/boards/nau8825.c
sound/soc/intel/avs/boards/rt5682.c
sound/soc/intel/avs/boards/ssm4567.c
sound/soc/intel/common/soc-acpi-intel-adl-match.c
sound/soc/qcom/qdsp6/q6prm.c
sound/soc/sof/intel/hda-ctrl.c
sound/soc/sof/intel/hda-dsp.c
sound/soc/sof/intel/pci-apl.c
sound/soc/sof/intel/pci-cnl.c
sound/soc/sof/intel/pci-icl.c
sound/soc/sof/intel/pci-mtl.c
sound/soc/sof/intel/pci-skl.c
sound/soc/sof/intel/pci-tgl.c
sound/soc/sof/intel/pci-tng.c
sound/soc/sof/ipc3-topology.c
sound/soc/sof/ipc3.c
sound/soc/sof/ipc4-control.c
sound/soc/sof/ipc4-topology.c
sound/soc/sof/ipc4-topology.h
sound/soc/sof/sof-audio.c
sound/soc/sof/topology.c
tools/bootconfig/test-bootconfig.sh
tools/include/uapi/linux/netdev.h
tools/lib/bpf/libbpf_internal.h
tools/net/ynl/lib/nlspec.py
tools/net/ynl/lib/ynl.py
tools/net/ynl/ynl-gen-c.py
tools/power/acpi/tools/pfrut/pfrut.c
tools/power/pm-graph/sleepgraph.py
tools/power/x86/turbostat/turbostat.8
tools/power/x86/turbostat/turbostat.c
tools/testing/selftests/amd-pstate/Makefile
tools/testing/selftests/bpf/prog_tests/uninit_stack.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/find_vma_fail1.c
tools/testing/selftests/bpf/progs/test_deny_namespace.c
tools/testing/selftests/bpf/progs/test_global_func10.c
tools/testing/selftests/bpf/progs/uninit_stack.c [new file with mode: 0644]
tools/testing/selftests/bpf/verifier/calls.c
tools/testing/selftests/bpf/verifier/helper_access_var_len.c
tools/testing/selftests/bpf/verifier/int_ptr.c
tools/testing/selftests/bpf/verifier/search_pruning.c
tools/testing/selftests/bpf/verifier/sock.c
tools/testing/selftests/bpf/verifier/spill_fill.c
tools/testing/selftests/bpf/verifier/var_off.c
tools/testing/selftests/drivers/net/bonding/Makefile
tools/testing/selftests/drivers/net/bonding/bond-eth-type-change.sh [new file with mode: 0755]
tools/testing/selftests/kvm/Makefile
tools/testing/selftests/kvm/aarch64/arch_timer.c
tools/testing/selftests/kvm/aarch64/get-reg-list.c
tools/testing/selftests/kvm/aarch64/smccc_filter.c [new file with mode: 0644]
tools/testing/selftests/kvm/config
tools/testing/selftests/kvm/include/aarch64/processor.h
tools/testing/selftests/kvm/lib/aarch64/processor.c
tools/testing/selftests/lib.mk
tools/testing/selftests/mm/mdwe_test.c
tools/testing/selftests/net/.gitignore
tools/testing/selftests/net/Makefile
tools/testing/selftests/net/bind_wildcard.c [new file with mode: 0644]
tools/testing/selftests/net/devlink_port_split.py
tools/testing/selftests/net/mptcp/userspace_pm.sh
tools/testing/selftests/x86/amx.c
tools/testing/vsock/vsock_test.c
tools/virtio/.gitignore
virt/kvm/kvm_main.c

index 8fe465f251c0368f7d66ba687aa4c5cbbfaf1b76..70ec6037fa7ac0b563d9790eefd0af168c1ad0ba 100644 (file)
@@ -78,6 +78,7 @@ modules.order
 # RPM spec file (make rpm-pkg)
 #
 /*.spec
+/rpmbuild/
 
 #
 # Debian directory (make deb-pkg)
index 424564f40733ade9bbf057a3840b45e4418e3ae5..e2af78f67f7cc20ef0170f4514885cd5b0da0a74 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -28,6 +28,7 @@ Alexander Lobakin <alobakin@pm.me> <bloodyreaper@yandex.ru>
 Alexander Mikhalitsyn <alexander@mihalicyn.com> <alexander.mikhalitsyn@virtuozzo.com>
 Alexander Mikhalitsyn <alexander@mihalicyn.com> <aleksandr.mikhalitsyn@canonical.com>
 Alexandre Belloni <alexandre.belloni@bootlin.com> <alexandre.belloni@free-electrons.com>
+Alexandre Ghiti <alex@ghiti.fr> <alexandre.ghiti@canonical.com>
 Alexei Starovoitov <ast@kernel.org> <alexei.starovoitov@gmail.com>
 Alexei Starovoitov <ast@kernel.org> <ast@fb.com>
 Alexei Starovoitov <ast@kernel.org> <ast@plumgrid.com>
@@ -121,7 +122,7 @@ Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@gmail.com>
 Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@imgtec.com>
 Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@mips.com>
 <dev.kurt@vandijck-laurijssen.be> <kurt.van.dijck@eia.be>
-Dikshita Agarwal <dikshita@qti.qualcomm.com> <dikshita@codeaurora.org>
+Dikshita Agarwal <quic_dikshita@quicinc.com> <dikshita@codeaurora.org>
 Dmitry Baryshkov <dbaryshkov@gmail.com>
 Dmitry Baryshkov <dbaryshkov@gmail.com> <[dbaryshkov@gmail.com]>
 Dmitry Baryshkov <dbaryshkov@gmail.com> <dmitry_baryshkov@mentor.com>
@@ -132,6 +133,8 @@ Dmitry Safonov <0x7f454c46@gmail.com> <dsafonov@virtuozzo.com>
 Domen Puncer <domen@coderock.org>
 Douglas Gilbert <dougg@torque.net>
 Ed L. Cashin <ecashin@coraid.com>
+Enric Balletbo i Serra <eballetbo@kernel.org> <enric.balletbo@collabora.com>
+Enric Balletbo i Serra <eballetbo@kernel.org> <eballetbo@iseebcn.com>
 Erik Kaneda <erik.kaneda@intel.com> <erik.schmauss@intel.com>
 Eugen Hristev <eugen.hristev@collabora.com> <eugen.hristev@microchip.com>
 Evgeniy Polyakov <johnpol@2ka.mipt.ru>
@@ -194,6 +197,7 @@ Jan Glauber <jan.glauber@gmail.com> <jang@linux.vnet.ibm.com>
 Jan Glauber <jan.glauber@gmail.com> <jglauber@cavium.com>
 Jarkko Sakkinen <jarkko@kernel.org> <jarkko.sakkinen@linux.intel.com>
 Jarkko Sakkinen <jarkko@kernel.org> <jarkko@profian.com>
+Jarkko Sakkinen <jarkko@kernel.org> <jarkko.sakkinen@tuni.fi>
 Jason Gunthorpe <jgg@ziepe.ca> <jgg@mellanox.com>
 Jason Gunthorpe <jgg@ziepe.ca> <jgg@nvidia.com>
 Jason Gunthorpe <jgg@ziepe.ca> <jgunthorpe@obsidianresearch.com>
@@ -213,6 +217,9 @@ Jens Axboe <axboe@suse.de>
 Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
 Jernej Skrabec <jernej.skrabec@gmail.com> <jernej.skrabec@siol.net>
 Jessica Zhang <quic_jesszhan@quicinc.com> <jesszhan@codeaurora.org>
+Jiri Pirko <jiri@resnulli.us> <jiri@nvidia.com>
+Jiri Pirko <jiri@resnulli.us> <jiri@mellanox.com>
+Jiri Pirko <jiri@resnulli.us> <jpirko@redhat.com>
 Jiri Slaby <jirislaby@kernel.org> <jirislaby@gmail.com>
 Jiri Slaby <jirislaby@kernel.org> <jslaby@novell.com>
 Jiri Slaby <jirislaby@kernel.org> <jslaby@suse.com>
@@ -374,6 +381,7 @@ Quentin Monnet <quentin@isovalent.com> <quentin.monnet@netronome.com>
 Quentin Perret <qperret@qperret.net> <quentin.perret@arm.com>
 Rafael J. Wysocki <rjw@rjwysocki.net> <rjw@sisk.pl>
 Rajeev Nandan <quic_rajeevny@quicinc.com> <rajeevny@codeaurora.org>
+Rajendra Nayak <quic_rjendra@quicinc.com> <rnayak@codeaurora.org>
 Rajesh Shah <rajesh.shah@intel.com>
 Ralf Baechle <ralf@linux-mips.org>
 Ralf Wildenhues <Ralf.Wildenhues@gmx.de>
@@ -382,6 +390,9 @@ Rémi Denis-Courmont <rdenis@simphalempin.com>
 Ricardo Ribalda <ribalda@kernel.org> <ricardo@ribalda.com>
 Ricardo Ribalda <ribalda@kernel.org> Ricardo Ribalda Delgado <ribalda@kernel.org>
 Ricardo Ribalda <ribalda@kernel.org> <ricardo.ribalda@gmail.com>
+Richard Leitner <richard.leitner@linux.dev> <dev@g0hl1n.net>
+Richard Leitner <richard.leitner@linux.dev> <me@g0hl1n.net>
+Richard Leitner <richard.leitner@linux.dev> <richard.leitner@skidata.com>
 Robert Foss <rfoss@kernel.org> <robert.foss@linaro.org>
 Roman Gushchin <roman.gushchin@linux.dev> <guro@fb.com>
 Roman Gushchin <roman.gushchin@linux.dev> <guroan@gmail.com>
@@ -392,6 +403,7 @@ Ross Zwisler <zwisler@kernel.org> <ross.zwisler@linux.intel.com>
 Rudolf Marek <R.Marek@sh.cvut.cz>
 Rui Saraiva <rmps@joel.ist.utl.pt>
 Sachin P Sant <ssant@in.ibm.com>
+Sai Prakash Ranjan <quic_saipraka@quicinc.com> <saiprakash.ranjan@codeaurora.org>
 Sakari Ailus <sakari.ailus@linux.intel.com> <sakari.ailus@iki.fi>
 Sam Ravnborg <sam@mars.ravnborg.org>
 Sankeerth Billakanti <quic_sbillaka@quicinc.com> <sbillaka@codeaurora.org>
@@ -432,6 +444,10 @@ Thomas Graf <tgraf@suug.ch>
 Thomas Körper <socketcan@esd.eu> <thomas.koerper@esd.eu>
 Thomas Pedersen <twp@codeaurora.org>
 Tiezhu Yang <yangtiezhu@loongson.cn> <kernelpatch@126.com>
+Tobias Klauser <tklauser@distanz.ch> <tobias.klauser@gmail.com>
+Tobias Klauser <tklauser@distanz.ch> <klto@zhaw.ch>
+Tobias Klauser <tklauser@distanz.ch> <tklauser@nuerscht.ch>
+Tobias Klauser <tklauser@distanz.ch> <tklauser@xenon.tklauser.home>
 Todor Tomov <todor.too@gmail.com> <todor.tomov@linaro.org>
 Tony Luck <tony.luck@intel.com>
 TripleX Chung <xxx.phy@gmail.com> <triplex@zh-kernel.org>
index 50b690f7f66392d5131a2410717caa0cc44cd53e..68abc089d6ddd173013b115ff65df1e84b292706 100644 (file)
@@ -242,7 +242,7 @@ group and can access them as follows::
 VFIO User API
 -------------------------------------------------------------------------------
 
-Please see include/linux/vfio.h for complete API documentation.
+Please see include/uapi/linux/vfio.h for complete API documentation.
 
 VFIO bus driver API
 -------------------------------------------------------------------------------
index c53f30251a660c8b143e32bac800420fb9afe057..f3b344f0c0a4bdb0b90fbdc1380060ed0813997d 100644 (file)
@@ -1222,7 +1222,7 @@ defined:
        return
        -ECHILD and it will be called again in ref-walk mode.
 
-``_weak_revalidate``
+``d_weak_revalidate``
        called when the VFS needs to revalidate a "jumped" dentry.  This
        is called when a path-walk ends at dentry that was not acquired
        by doing a lookup in the parent directory.  This includes "/",
index b9dc0c603f3672789a462945fad841d95d5b8307..56d9913a3370c47b2c10ebe0f8df58c6c6b4c674 100644 (file)
@@ -19,7 +19,7 @@ possible we decided to do following:
     platform devices.
 
   - Devices behind real busses where there is a connector resource
-    are represented as struct spi_device or struct i2c_device. Note
+    are represented as struct spi_device or struct i2c_client. Note
     that standard UARTs are not busses so there is no struct uart_device,
     although some of them may be represented by struct serdev_device.
 
index 09f988e7fa719e62a598e1846f112db002a851eb..85800ce95ae5fccc4452626a1fada16d8961b84a 100644 (file)
@@ -213,11 +213,7 @@ point rather than some random spot.  If your upstream-bound branch has
 emptied entirely into the mainline during the merge window, you can pull it
 forward with a command like::
 
-  git merge v5.2-rc1^0
-
-The "^0" will cause Git to do a fast-forward merge (which should be
-possible in this situation), thus avoiding the addition of a spurious merge
-commit.
+  git merge --ff-only v5.2-rc1
 
 The guidelines laid out above are just that: guidelines.  There will always
 be situations that call out for a different solution, and these guidelines
index 3d05d64de9b463ac1caa68c5d1d46487ab9f75bc..d9c2b0f01dcd0fc1a71c029c98ecdbbd9c790311 100644 (file)
@@ -5,10 +5,10 @@ Hugetlbfs Reservation
 Overview
 ========
 
-Huge pages as described at Documentation/mm/hugetlbpage.rst are typically
-preallocated for application use.  These huge pages are instantiated in a
-task's address space at page fault time if the VMA indicates huge pages are
-to be used.  If no huge page exists at page fault time, the task is sent
+Huge pages as described at Documentation/admin-guide/mm/hugetlbpage.rst are
+typically preallocated for application use.  These huge pages are instantiated
+in a task's address space at page fault time if the VMA indicates huge pages
+are to be used.  If no huge page exists at page fault time, the task is sent
 a SIGBUS and often dies an unhappy death.  Shortly after huge page support
 was added, it was determined that it would be better to detect a shortage
 of huge pages at mmap() time.  The idea is that if there were not enough
index f9d7ea4b9dca7fd6b81fba0d653186e88d0c607b..1bc888d36ea134c2615534bbd3d1238b173f3c93 100644 (file)
@@ -66,7 +66,7 @@ one of the types described below.
   also populated on boot using one of ``kernelcore``, ``movablecore`` and
   ``movable_node`` kernel command line parameters. See
   Documentation/mm/page_migration.rst and
-  Documentation/admin-guide/mm/memory_hotplug.rst for additional details.
+  Documentation/admin-guide/mm/memory-hotplug.rst for additional details.
 
 * ``ZONE_DEVICE`` represents memory residing on devices such as PMEM and GPU.
   It has different characteristics than RAM zone types and it exists to provide
index f082a5ad7cf1d47d127086111de8f6b1a8ac5acc..5c3642b3f802df5363d453350f421a37cc23ad25 100644 (file)
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
 %YAML 1.2
 ---
 $id: http://kernel.org/schemas/netlink/genetlink-c.yaml#
index c6b8c77f7d12e82f1ccd4f106a9ee2df34e5f5ff..5e98c6d2b9aae89e9a6b8f0111552e65709ab7ef 100644 (file)
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
 %YAML 1.2
 ---
 $id: http://kernel.org/schemas/netlink/genetlink-legacy.yaml#
index b2d56ab9e615d3262c1a2b757c465f1d5b41b007..d35dcd6f8d82a784414e3521945cff3b42644a49 100644 (file)
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
 %YAML 1.2
 ---
 $id: http://kernel.org/schemas/netlink/genetlink-legacy.yaml#
index 18ecb7d90cbe5f3baf464987be6eb09d3f8ae948..4727c067e2ba361ef1271a5afaac0307b10e744f 100644 (file)
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
 
 name: ethtool
 
index cff104288723bb9dfb723c0a47a038b10b9db6d6..3e13826a3fdf18c746c59ae35f9c48af434488de 100644 (file)
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
 
 name: fou
 
index 24de747b53443158fc8adab3816219dcd3e82ec7..b99e7ffef7a1578f837e96047ddd0e4967adbed8 100644 (file)
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
 
 name: netdev
 
@@ -9,6 +9,7 @@ definitions:
   -
     type: flags
     name: xdp-act
+    render-max: true
     entries:
       -
         name: basic
index aac63fc2d08bd104634315d422d33f24717de4aa..25ce72af81c216322edfe56da3195fdd13a47db8 100644 (file)
@@ -23,10 +23,13 @@ metadata is supported, this set will grow:
 An XDP program can use these kfuncs to read the metadata into stack
 variables for its own consumption. Or, to pass the metadata on to other
 consumers, an XDP program can store it into the metadata area carried
-ahead of the packet.
+ahead of the packet. Not all packets will necessary have the requested
+metadata available in which case the driver returns ``-ENODATA``.
 
 Not all kfuncs have to be implemented by the device driver; when not
-implemented, the default ones that return ``-EOPNOTSUPP`` will be used.
+implemented, the default ones that return ``-EOPNOTSUPP`` will be used
+to indicate the device driver have not implemented this kfunc.
+
 
 Within an XDP frame, the metadata layout (accessed via ``xdp_buff``) is
 as follows::
index 5fc9160ca1fa5b3054887d59e7336941d51d3a5e..bc56dee6d0bcb1c5ad558d88a97ae0a9ede718ad 100644 (file)
@@ -12,10 +12,6 @@ under ``-std=gnu11`` [gcc-c-dialect-options]_: the GNU dialect of ISO C11.
 This dialect contains many extensions to the language [gnu-extensions]_,
 and many of them are used within the kernel as a matter of course.
 
-There is some support for compiling the kernel with ``icc`` [icc]_ for several
-of the architectures, although at the time of writing it is not completed,
-requiring third-party patches.
-
 Attributes
 ----------
 
@@ -35,12 +31,28 @@ in order to feature detect which ones can be used and/or to shorten the code.
 
 Please refer to ``include/linux/compiler_attributes.h`` for more information.
 
+Rust
+----
+
+The kernel has experimental support for the Rust programming language
+[rust-language]_ under ``CONFIG_RUST``. It is compiled with ``rustc`` [rustc]_
+under ``--edition=2021`` [rust-editions]_. Editions are a way to introduce
+small changes to the language that are not backwards compatible.
+
+On top of that, some unstable features [rust-unstable-features]_ are used in
+the kernel. Unstable features may change in the future, thus it is an important
+goal to reach a point where only stable features are used.
+
+Please refer to Documentation/rust/index.rst for more information.
+
 .. [c-language] http://www.open-std.org/jtc1/sc22/wg14/www/standards
 .. [gcc] https://gcc.gnu.org
 .. [clang] https://clang.llvm.org
-.. [icc] https://software.intel.com/en-us/c-compilers
 .. [gcc-c-dialect-options] https://gcc.gnu.org/onlinedocs/gcc/C-Dialect-Options.html
 .. [gnu-extensions] https://gcc.gnu.org/onlinedocs/gcc/C-Extensions.html
 .. [gcc-attribute-syntax] https://gcc.gnu.org/onlinedocs/gcc/Attribute-Syntax.html
 .. [n2049] http://www.open-std.org/jtc1/sc22/wg14/www/docs/n2049.pdf
-
+.. [rust-language] https://www.rust-lang.org
+.. [rustc] https://doc.rust-lang.org/rustc/
+.. [rust-editions] https://doc.rust-lang.org/edition-guide/editions/
+.. [rust-unstable-features] https://github.com/Rust-for-Linux/linux/issues/2
index eac7167dce83d0fd7665a874aceaa4c9fa5d9e83..69ce64e03c70f8e5ff39c951d28f3597f40a3bdd 100644 (file)
@@ -320,7 +320,7 @@ for their time.  Code review is a tiring and time-consuming process, and
 reviewers sometimes get grumpy.  Even in that case, though, respond
 politely and address the problems they have pointed out.  When sending a next
 version, add a ``patch changelog`` to the cover letter or to individual patches
-explaining difference aganst previous submission (see
+explaining difference against previous submission (see
 :ref:`the_canonical_patch_format`).
 
 See Documentation/process/email-clients.rst for recommendations on email
index 8e2b8538bc2b7414d99a0f1726cec6af89fe1065..e2c1cf7431588e6bba3bf5b6fd489c85f652d7ec 100644 (file)
@@ -258,7 +258,7 @@ Linux cannot currently figure out CPU capacity on its own, this information thus
 needs to be handed to it. Architectures must define arch_scale_cpu_capacity()
 for that purpose.
 
-The arm and arm64 architectures directly map this to the arch_topology driver
+The arm, arm64, and RISC-V architectures directly map this to the arch_topology driver
 CPU scaling data, which is derived from the capacity-dmips-mhz CPU binding; see
 Documentation/devicetree/bindings/cpu/cpu-capacity.txt.
 
index c1fa35315d8b2238892d1ff7661873a532088388..b7a0544224ad1aa70213b37c7c68aece4f8dfb5f 100644 (file)
@@ -15,7 +15,8 @@ Hugetlbfs 预留
 概述
 ====
 
-Documentation/mm/hugetlbpage.rst 中描述的巨页通常是预先分配给应用程序使用的。如果VMA指
+Documentation/admin-guide/mm/hugetlbpage.rst
+中描述的巨页通常是预先分配给应用程序使用的 。如果VMA指
 示要使用巨页,这些巨页会在缺页异常时被实例化到任务的地址空间。如果在缺页异常
 时没有巨页存在,任务就会被发送一个SIGBUS,并经常不高兴地死去。在加入巨页支
 持后不久,人们决定,在mmap()时检测巨页的短缺情况会更好。这个想法是,如果
index e07ffdd391d32d97b0dda5c37b32da13b0d682c7..8cba135dcd1a6b4aa718f8cc215d8a84ed33f919 100644 (file)
@@ -231,7 +231,7 @@ CFS调度类基于实体负载跟踪机制(Per-Entity Load Tracking, PELT)
 当前,Linux无法凭自身算出CPU算力,因此必须要有把这个信息传递给Linux的方式。每个架构必须为此
 定义arch_scale_cpu_capacity()函数。
 
-arm和arm64架构直接把这个信息映射到arch_topology驱动的CPU scaling数据中(译注:参考
+arm、arm64和RISC-V架构直接把这个信息映射到arch_topology驱动的CPU scaling数据中(译注:参考
 arch_topology.h的percpu变量cpu_scale),它是从capacity-dmips-mhz CPU binding中衍生计算
 出来的。参见Documentation/devicetree/bindings/cpu/cpu-capacity.txt。
 
diff --git a/Documentation/usb/gadget_uvc.rst b/Documentation/usb/gadget_uvc.rst
new file mode 100644 (file)
index 0000000..6d22fac
--- /dev/null
@@ -0,0 +1,352 @@
+=======================
+Linux UVC Gadget Driver
+=======================
+
+Overview
+--------
+The UVC Gadget driver is a driver for hardware on the *device* side of a USB
+connection. It is intended to run on a Linux system that has USB device-side
+hardware such as boards with an OTG port.
+
+On the device system, once the driver is bound it appears as a V4L2 device with
+the output capability.
+
+On the host side (once connected via USB cable), a device running the UVC Gadget
+driver *and controlled by an appropriate userspace program* should appear as a UVC
+specification compliant camera, and function appropriately with any program
+designed to handle them. The userspace program running on the device system can
+queue image buffers from a variety of sources to be transmitted via the USB
+connection. Typically this would mean forwarding the buffers from a camera sensor
+peripheral, but the source of the buffer is entirely dependent on the userspace
+companion program.
+
+Configuring the device kernel
+-----------------------------
+The Kconfig options USB_CONFIGFS, USB_LIBCOMPOSITE, USB_CONFIGFS_F_UVC and
+USB_F_UVC must be selected to enable support for the UVC gadget.
+
+Configuring the gadget through configfs
+---------------------------------------
+The UVC Gadget expects to be configured through configfs using the UVC function.
+This allows a significant degree of flexibility, as many of a UVC device's
+settings can be controlled this way.
+
+Not all of the available attributes are described here. For a complete enumeration
+see Documentation/ABI/testing/configfs-usb-gadget-uvc
+
+Assumptions
+~~~~~~~~~~~
+This section assumes that you have mounted configfs at `/sys/kernel/config` and
+created a gadget as `/sys/kernel/config/usb_gadget/g1`.
+
+The UVC Function
+~~~~~~~~~~~~~~~~
+
+The first step is to create the UVC function:
+
+.. code-block:: bash
+
+       # These variables will be assumed throughout the rest of the document
+       CONFIGFS="/sys/kernel/config"
+       GADGET="$CONFIGFS/usb_gadget/g1"
+       FUNCTION="$GADGET/functions/uvc.0"
+
+       mkdir -p $FUNCTION
+
+Formats and Frames
+~~~~~~~~~~~~~~~~~~
+
+You must configure the gadget by telling it which formats you support, as well
+as the frame sizes and frame intervals that are supported for each format. In
+the current implementation there is no way for the gadget to refuse to set a
+format that the host instructs it to set, so it is important that this step is
+completed *accurately* to ensure that the host never asks for a format that
+can't be provided.
+
+Formats are created under the streaming/uncompressed and streaming/mjpeg configfs
+groups, with the framesizes created under the formats in the following
+structure:
+
+::
+
+       uvc.0 +
+             |
+             + streaming +
+                         |
+                         + mjpeg +
+                         |       |
+                         |       + mjpeg +
+                         |            |
+                         |            + 720p
+                         |            |
+                         |            + 1080p
+                         |
+                         + uncompressed +
+                                        |
+                                        + yuyv +
+                                               |
+                                               + 720p
+                                               |
+                                               + 1080p
+
+Each frame can then be configured with a width and height, plus the maximum
+buffer size required to store a single frame, and finally with the supported
+frame intervals for that format and framesize. Width and height are enumerated in
+units of pixels, frame interval in units of 100ns. To create the structure
+above with 2, 15 and 100 fps frameintervals for each framesize for example you
+might do:
+
+.. code-block:: bash
+
+       create_frame() {
+               # Example usage:
+               # create_frame <width> <height> <group> <format name>
+
+               WIDTH=$1
+               HEIGHT=$2
+               FORMAT=$3
+               NAME=$4
+
+               wdir=$FUNCTION/streaming/$FORMAT/$NAME/${HEIGHT}p
+
+               mkdir -p $wdir
+               echo $WIDTH > $wdir/wWidth
+               echo $HEIGHT > $wdir/wHeight
+               echo $(( $WIDTH * $HEIGHT * 2 )) > $wdir/dwMaxVideoFrameBufferSize
+               cat <<EOF > $wdir/dwFrameInterval
+       666666
+       100000
+       5000000
+       EOF
+       }
+
+       create_frame 1280 720 mjpeg mjpeg
+       create_frame 1920 1080 mjpeg mjpeg
+       create_frame 1280 720 uncompressed yuyv
+       create_frame 1920 1080 uncompressed yuyv
+
+The only uncompressed format currently supported is YUYV, which is detailed at
+Documentation/userspace-api/media/v4l/pixfmt-packed.yuv.rst.
+
+Color Matching Descriptors
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+It's possible to specify some colometry information for each format you create.
+This step is optional, and default information will be included if this step is
+skipped; those default values follow those defined in the Color Matching Descriptor
+section of the UVC specification.
+
+To create a Color Matching Descriptor, create a configfs item and set its three
+attributes to your desired settings and then link to it from the format you wish
+it to be associated with:
+
+.. code-block:: bash
+
+       # Create a new Color Matching Descriptor
+
+       mkdir $FUNCTION/streaming/color_matching/yuyv
+       pushd $FUNCTION/streaming/color_matching/yuyv
+
+       echo 1 > bColorPrimaries
+       echo 1 > bTransferCharacteristics
+       echo 4 > bMatrixCoefficients
+
+       popd
+
+       # Create a symlink to the Color Matching Descriptor from the format's config item
+       ln -s $FUNCTION/streaming/color_matching/yuyv $FUNCTION/streaming/uncompressed/yuyv
+
+For details about the valid values, consult the UVC specification. Note that a
+default color matching descriptor exists and is used by any format which does
+not have a link to a different Color Matching Descriptor. It's possible to
+change the attribute settings for the default descriptor, so bear in mind that if
+you do that you are altering the defaults for any format that does not link to
+a different one.
+
+
+Header linking
+~~~~~~~~~~~~~~
+
+The UVC specification requires that Format and Frame descriptors be preceded by
+Headers detailing things such as the number and cumulative size of the different
+Format descriptors that follow. This and similar operations are acheived in
+configfs by linking between the configfs item representing the header and the
+config items representing those other descriptors, in this manner:
+
+.. code-block:: bash
+
+       mkdir $FUNCTION/streaming/header/h
+
+       # This section links the format descriptors and their associated frames
+       # to the header
+       cd $FUNCTION/streaming/header/h
+       ln -s ../../uncompressed/yuyv
+       ln -s ../../mjpeg/mjpeg
+
+       # This section ensures that the header will be transmitted for each
+       # speed's set of descriptors. If support for a particular speed is not
+       # needed then it can be skipped here.
+       cd ../../class/fs
+       ln -s ../../header/h
+       cd ../../class/hs
+       ln -s ../../header/h
+       cd ../../class/ss
+       ln -s ../../header/h
+       cd ../../../control
+       mkdir header/h
+       ln -s header/h class/fs
+       ln -s header/h class/ss
+
+
+Extension Unit Support
+~~~~~~~~~~~~~~~~~~~~~~
+
+A UVC Extension Unit (XU) basically provides a distinct unit to which control set
+and get requests can be addressed. The meaning of those control requests is
+entirely implementation dependent, but may be used to control settings outside
+of the UVC specification (for example enabling or disabling video effects). An
+XU can be inserted into the UVC unit chain or left free-hanging.
+
+Configuring an extension unit involves creating an entry in the appropriate
+directory and setting its attributes appropriately, like so:
+
+.. code-block:: bash
+
+       mkdir $FUNCTION/control/extensions/xu.0
+       pushd $FUNCTION/control/extensions/xu.0
+
+       # Set the bUnitID of the Processing Unit as the source for this
+       # Extension Unit
+       echo 2 > baSourceID
+
+       # Set this XU as the source of the default output terminal. This inserts
+       # the XU into the UVC chain between the PU and OT such that the final
+       # chain is IT > PU > XU.0 > OT
+       cat bUnitID > ../../terminal/output/default/baSourceID
+
+       # Flag some controls as being available for use. The bmControl field is
+       # a bitmap with each bit denoting the availability of a particular
+       # control. For example to flag the 0th, 2nd and 3rd controls available:
+       echo 0x0d > bmControls
+
+       # Set the GUID; this is a vendor-specific code identifying the XU.
+       echo -e -n "\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10" > guidExtensionCode
+
+       popd
+
+The bmControls attribute and the baSourceID attribute are multi-value attributes.
+This means that you may write multiple newline separated values to them. For
+example to flag the 1st, 2nd, 9th and 10th controls as being available you would
+need to write two values to bmControls, like so:
+
+.. code-block:: bash
+
+       cat << EOF > bmControls
+       0x03
+       0x03
+       EOF
+
+The multi-value nature of the baSourceID attribute belies the fact that XUs can
+be multiple-input, though note that this currently has no significant effect.
+
+The bControlSize attribute reflects the size of the bmControls attribute, and
+similarly bNrInPins reflects the size of the baSourceID attributes. Both
+attributes are automatically increased / decreased as you set bmControls and
+baSourceID. It is also possible to manually increase or decrease bControlSize
+which has the effect of truncating entries to the new size, or padding entries
+out with 0x00, for example:
+
+::
+
+       $ cat bmControls
+       0x03
+       0x05
+
+       $ cat bControlSize
+       2
+
+       $ echo 1 > bControlSize
+       $ cat bmControls
+       0x03
+
+       $ echo 2 > bControlSize
+       $ cat bmControls
+       0x03
+       0x00
+
+bNrInPins and baSourceID function in the same way.
+
+Custom Strings Support
+~~~~~~~~~~~~~~~~~~~~~~
+
+String descriptors that provide a textual description for various parts of a
+USB device can be defined in the usual place within USB configfs, and may then
+be linked to from the UVC function root or from Extension Unit directories to
+assign those strings as descriptors:
+
+.. code-block:: bash
+
+       # Create a string descriptor in us-EN and link to it from the function
+       # root. The name of the link is significant here, as it declares this
+       # descriptor to be intended for the Interface Association Descriptor.
+       # Other significant link names at function root are vs0_desc and vs1_desc
+       # For the VideoStreaming Interface 0/1 Descriptors.
+
+       mkdir -p $GADGET/strings/0x409/iad_desc
+       echo -n "Interface Associaton Descriptor" > $GADGET/strings/0x409/iad_desc/s
+       ln -s $GADGET/strings/0x409/iad_desc $FUNCTION/iad_desc
+
+       # Because the link to a String Descriptor from an Extension Unit clearly
+       # associates the two, the name of this link is not significant and may
+       # be set freely.
+
+       mkdir -p $GADGET/strings/0x409/xu.0
+       echo -n "A Very Useful Extension Unit" > $GADGET/strings/0x409/xu.0/s
+       ln -s $GADGET/strings/0x409/xu.0 $FUNCTION/control/extensions/xu.0
+
+The interrupt endpoint
+~~~~~~~~~~~~~~~~~~~~~~
+
+The VideoControl interface has an optional interrupt endpoint which is by default
+disabled. This is intended to support delayed response control set requests for
+UVC (which should respond through the interrupt endpoint rather than tying up
+endpoint 0). At present support for sending data through this endpoint is missing
+and so it is left disabled to avoid confusion. If you wish to enable it you can
+do so through the configfs attribute:
+
+.. code-block:: bash
+
+       echo 1 > $FUNCTION/control/enable_interrupt_ep
+
+Bandwidth configuration
+~~~~~~~~~~~~~~~~~~~~~~~
+
+There are three attributes which control the bandwidth of the USB connection.
+These live in the function root and can be set within limits:
+
+.. code-block:: bash
+
+       # streaming_interval sets bInterval. Values range from 1..255
+       echo 1 > $FUNCTION/streaming_interval
+
+       # streaming_maxpacket sets wMaxPacketSize. Valid values are 1024/2048/3072
+       echo 3072 > $FUNCTION/streaming_maxpacket
+
+       # streaming_maxburst sets bMaxBurst. Valid values are 1..15
+       echo 1 > $FUNCTION/streaming_maxburst
+
+
+The values passed here will be clamped to valid values according to the UVC
+specification (which depend on the speed of the USB connection). To understand
+how the settings influence bandwidth you should consult the UVC specifications,
+but a rule of thumb is that increasing the streaming_maxpacket setting will
+improve bandwidth (and thus the maximum possible framerate), whilst the same is
+true for streaming_maxburst provided the USB connection is running at SuperSpeed.
+Increasing streaming_interval will reduce bandwidth and framerate.
+
+The userspace application
+-------------------------
+By itself, the UVC Gadget driver cannot do anything particularly interesting. It
+must be paired with a userspace program that responds to UVC control requests and
+fills buffers to be queued to the V4L2 device that the driver creates. How those
+things are achieved is implementation dependent and beyond the scope of this
+document, but a reference application can be found at https://gitlab.freedesktop.org/camera/uvc-gadget
index b656c9be23ed230af5b499ef68689a9bfdfa5094..27955dad95e12403fbae8dda7c64dd9771fec3bd 100644 (file)
@@ -16,6 +16,7 @@ USB support
     gadget_multi
     gadget_printer
     gadget_serial
+    gadget_uvc
     gadget-testing
     iuu_phoenix
     mass-storage
index 2122e0c4a39987c4fdf86a1d4fbbcb954d1813ce..a22442ba1d30bb4bfeae33bf85a71458d235f7ba 100644 (file)
@@ -24,7 +24,8 @@ YAML specifications can be found under ``Documentation/netlink/specs/``
 This document describes details of the schema.
 See :doc:`intro-specs` for a practical starting guide.
 
-All specs must be licensed under ``GPL-2.0-only OR BSD-3-Clause``
+All specs must be licensed under
+``((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)``
 to allow for easy adoption in user space code.
 
 Compatibility levels
index 48fad65568227a554cd58bb86ce5a5b15d2d95a0..e0c63de8e8374d5645efdd3af61bdcdea7964926 100644 (file)
@@ -6030,6 +6030,44 @@ delivery must be provided via the "reg_aen" struct.
 The "pad" and "reserved" fields may be used for future extensions and should be
 set to 0s by userspace.
 
+4.138 KVM_ARM_SET_COUNTER_OFFSET
+--------------------------------
+
+:Capability: KVM_CAP_COUNTER_OFFSET
+:Architectures: arm64
+:Type: vm ioctl
+:Parameters: struct kvm_arm_counter_offset (in)
+:Returns: 0 on success, < 0 on error
+
+This capability indicates that userspace is able to apply a single VM-wide
+offset to both the virtual and physical counters as viewed by the guest
+using the KVM_ARM_SET_CNT_OFFSET ioctl and the following data structure:
+
+::
+
+       struct kvm_arm_counter_offset {
+               __u64 counter_offset;
+               __u64 reserved;
+       };
+
+The offset describes a number of counter cycles that are subtracted from
+both virtual and physical counter views (similar to the effects of the
+CNTVOFF_EL2 and CNTPOFF_EL2 system registers, but only global). The offset
+always applies to all vcpus (already created or created after this ioctl)
+for this VM.
+
+It is userspace's responsibility to compute the offset based, for example,
+on previous values of the guest counters.
+
+Any value other than 0 for the "reserved" field may result in an error
+(-EINVAL) being returned. This ioctl can also return -EBUSY if any vcpu
+ioctl is issued concurrently.
+
+Note that using this ioctl results in KVM ignoring subsequent userspace
+writes to the CNTVCT_EL0 and CNTPCT_EL0 registers using the SET_ONE_REG
+interface. No error will be returned, but the resulting offset will not be
+applied.
+
 5. The kvm_run structure
 ========================
 
@@ -6219,15 +6257,40 @@ to the byte array.
                        __u64 nr;
                        __u64 args[6];
                        __u64 ret;
-                       __u32 longmode;
-                       __u32 pad;
+                       __u64 flags;
                } hypercall;
 
-Unused.  This was once used for 'hypercall to userspace'.  To implement
-such functionality, use KVM_EXIT_IO (x86) or KVM_EXIT_MMIO (all except s390).
+
+It is strongly recommended that userspace use ``KVM_EXIT_IO`` (x86) or
+``KVM_EXIT_MMIO`` (all except s390) to implement functionality that
+requires a guest to interact with host userpace.
 
 .. note:: KVM_EXIT_IO is significantly faster than KVM_EXIT_MMIO.
 
+For arm64:
+----------
+
+SMCCC exits can be enabled depending on the configuration of the SMCCC
+filter. See the Documentation/virt/kvm/devices/vm.rst
+``KVM_ARM_SMCCC_FILTER`` for more details.
+
+``nr`` contains the function ID of the guest's SMCCC call. Userspace is
+expected to use the ``KVM_GET_ONE_REG`` ioctl to retrieve the call
+parameters from the vCPU's GPRs.
+
+Definition of ``flags``:
+ - ``KVM_HYPERCALL_EXIT_SMC``: Indicates that the guest used the SMC
+   conduit to initiate the SMCCC call. If this bit is 0 then the guest
+   used the HVC conduit for the SMCCC call.
+
+ - ``KVM_HYPERCALL_EXIT_16BIT``: Indicates that the guest used a 16bit
+   instruction to initiate the SMCCC call. If this bit is 0 then the
+   guest used a 32bit instruction. An AArch64 guest always has this
+   bit set to 0.
+
+At the point of exit, PC points to the instruction immediately following
+the trapping instruction.
+
 ::
 
                /* KVM_EXIT_TPR_ACCESS */
@@ -7267,6 +7330,7 @@ and injected exceptions.
        will clear DR6.RTM.
 
 7.18 KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2
+--------------------------------------
 
 :Architectures: x86, arm64, mips
 :Parameters: args[0] whether feature should be enabled or not
index 147efec626e5218825e36d7e4035e07e4892d86b..9d726e60ec472a6eb3fb0c9453932954e604e6fb 100644 (file)
@@ -321,3 +321,82 @@ Allows userspace to query the status of migration mode.
             if it is enabled
 :Returns:   -EFAULT if the given address is not accessible from kernel space;
            0 in case of success.
+
+6. GROUP: KVM_ARM_VM_SMCCC_CTRL
+===============================
+
+:Architectures: arm64
+
+6.1. ATTRIBUTE: KVM_ARM_VM_SMCCC_FILTER (w/o)
+---------------------------------------------
+
+:Parameters: Pointer to a ``struct kvm_smccc_filter``
+
+:Returns:
+
+        ======  ===========================================
+        EEXIST  Range intersects with a previously inserted
+                or reserved range
+        EBUSY   A vCPU in the VM has already run
+        EINVAL  Invalid filter configuration
+        ENOMEM  Failed to allocate memory for the in-kernel
+                representation of the SMCCC filter
+        ======  ===========================================
+
+Requests the installation of an SMCCC call filter described as follows::
+
+    enum kvm_smccc_filter_action {
+            KVM_SMCCC_FILTER_HANDLE = 0,
+            KVM_SMCCC_FILTER_DENY,
+            KVM_SMCCC_FILTER_FWD_TO_USER,
+    };
+
+    struct kvm_smccc_filter {
+            __u32 base;
+            __u32 nr_functions;
+            __u8 action;
+            __u8 pad[15];
+    };
+
+The filter is defined as a set of non-overlapping ranges. Each
+range defines an action to be applied to SMCCC calls within the range.
+Userspace can insert multiple ranges into the filter by using
+successive calls to this attribute.
+
+The default configuration of KVM is such that all implemented SMCCC
+calls are allowed. Thus, the SMCCC filter can be defined sparsely
+by userspace, only describing ranges that modify the default behavior.
+
+The range expressed by ``struct kvm_smccc_filter`` is
+[``base``, ``base + nr_functions``). The range is not allowed to wrap,
+i.e. userspace cannot rely on ``base + nr_functions`` overflowing.
+
+The SMCCC filter applies to both SMC and HVC calls initiated by the
+guest. The SMCCC filter gates the in-kernel emulation of SMCCC calls
+and as such takes effect before other interfaces that interact with
+SMCCC calls (e.g. hypercall bitmap registers).
+
+Actions:
+
+ - ``KVM_SMCCC_FILTER_HANDLE``: Allows the guest SMCCC call to be
+   handled in-kernel. It is strongly recommended that userspace *not*
+   explicitly describe the allowed SMCCC call ranges.
+
+ - ``KVM_SMCCC_FILTER_DENY``: Rejects the guest SMCCC call in-kernel
+   and returns to the guest.
+
+ - ``KVM_SMCCC_FILTER_FWD_TO_USER``: The guest SMCCC call is forwarded
+   to userspace with an exit reason of ``KVM_EXIT_HYPERCALL``.
+
+The ``pad`` field is reserved for future use and must be zero. KVM may
+return ``-EINVAL`` if the field is nonzero.
+
+KVM reserves the 'Arm Architecture Calls' range of function IDs and
+will reject attempts to define a filter for any portion of these ranges:
+
+        =========== ===============
+        Start       End (inclusive)
+        =========== ===============
+        0x8000_0000 0x8000_FFFF
+        0xC000_0000 0xC000_FFFF
+        =========== ===============
index 14c4e9fa501d6b6e0bcbd12ded0f95e760f49ddd..8c77554e4896340533aa2e37a2dee57b0dd28685 100644 (file)
@@ -21,7 +21,7 @@ The acquisition orders for mutexes are as follows:
 - kvm->mn_active_invalidate_count ensures that pairs of
   invalidate_range_start() and invalidate_range_end() callbacks
   use the same memslots array.  kvm->slots_lock and kvm->slots_arch_lock
-  are taken on the waiting side in install_new_memslots, so MMU notifiers
+  are taken on the waiting side when modifying memslots, so MMU notifiers
   must not take either kvm->slots_lock or kvm->slots_arch_lock.
 
 For SRCU:
index ec57c42ed5440d5d2c6721d30aa0f4a69ec8b44a..1dc8bd26b6cff1541d91ac8bdf9645c2c3d9fb18 100644 (file)
@@ -5971,7 +5971,7 @@ F:        include/linux/dm-*.h
 F:     include/uapi/linux/dm-*.h
 
 DEVLINK
-M:     Jiri Pirko <jiri@nvidia.com>
+M:     Jiri Pirko <jiri@resnulli.us>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     Documentation/networking/devlink
@@ -14872,12 +14872,12 @@ M:    Sagi Grimberg <sagi@grimberg.me>
 L:     linux-nvme@lists.infradead.org
 S:     Supported
 W:     http://git.infradead.org/nvme.git
-T:     git://git.infradead.org/nvme.git
+T:     git git://git.infradead.org/nvme.git
 F:     Documentation/nvme/
-F:     drivers/nvme/host/
 F:     drivers/nvme/common/
-F:     include/linux/nvme.h
+F:     drivers/nvme/host/
 F:     include/linux/nvme-*.h
+F:     include/linux/nvme.h
 F:     include/uapi/linux/nvme_ioctl.h
 
 NVM EXPRESS FABRICS AUTHENTICATION
@@ -14912,7 +14912,7 @@ M:      Chaitanya Kulkarni <kch@nvidia.com>
 L:     linux-nvme@lists.infradead.org
 S:     Supported
 W:     http://git.infradead.org/nvme.git
-T:     git://git.infradead.org/nvme.git
+T:     git git://git.infradead.org/nvme.git
 F:     drivers/nvme/target/
 
 NVMEM FRAMEWORK
@@ -15079,7 +15079,7 @@ F:      Documentation/hwmon/nzxt-smart2.rst
 F:     drivers/hwmon/nzxt-smart2.c
 
 OBJAGG
-M:     Jiri Pirko <jiri@nvidia.com>
+M:     Jiri Pirko <jiri@resnulli.us>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     include/linux/objagg.h
@@ -15853,7 +15853,7 @@ F:      drivers/video/logo/logo_parisc*
 F:     include/linux/hp_sdc.h
 
 PARMAN
-M:     Jiri Pirko <jiri@nvidia.com>
+M:     Jiri Pirko <jiri@resnulli.us>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     include/linux/parman.h
@@ -17990,7 +17990,7 @@ F:      Documentation/devicetree/bindings/spi/microchip,mpfs-spi.yaml
 F:     Documentation/devicetree/bindings/usb/microchip,mpfs-musb.yaml
 F:     arch/riscv/boot/dts/microchip/
 F:     drivers/char/hw_random/mpfs-rng.c
-F:     drivers/clk/microchip/clk-mpfs.c
+F:     drivers/clk/microchip/clk-mpfs*.c
 F:     drivers/i2c/busses/i2c-microchip-corei2c.c
 F:     drivers/mailbox/mailbox-mpfs.c
 F:     drivers/pci/controller/pcie-microchip-host.c
@@ -19150,9 +19150,7 @@ W:      http://www.brownhat.org/sis900.html
 F:     drivers/net/ethernet/sis/sis900.*
 
 SIS FRAMEBUFFER DRIVER
-M:     Thomas Winischhofer <thomas@winischhofer.net>
-S:     Maintained
-W:     http://www.winischhofer.net/linuxsisvga.shtml
+S:     Orphan
 F:     Documentation/fb/sisfb.rst
 F:     drivers/video/fbdev/sis/
 F:     include/video/sisfb.h
@@ -21645,6 +21643,7 @@ USB OVER IP DRIVER
 M:     Valentina Manea <valentina.manea.m@gmail.com>
 M:     Shuah Khan <shuah@kernel.org>
 M:     Shuah Khan <skhan@linuxfoundation.org>
+R:     Hongren Zheng <i@zenithal.me>
 L:     linux-usb@vger.kernel.org
 S:     Maintained
 F:     Documentation/usb/usbip_protocol.rst
index c933ceb4f21db15d9789f8e9fd12a931174c121f..da2586d4c72815ceccd9e03ebe9a9d11b079c106 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 6
 PATCHLEVEL = 3
 SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc4
 NAME = Hurr durr I'ma ninja sloth
 
 # *DOCUMENTATION*
@@ -274,8 +274,7 @@ no-dot-config-targets := $(clean-targets) \
                         cscope gtags TAGS tags help% %docs check% coccicheck \
                         $(version_h) headers headers_% archheaders archscripts \
                         %asm-generic kernelversion %src-pkg dt_binding_check \
-                        outputmakefile rustavailable rustfmt rustfmtcheck \
-                        scripts_package
+                        outputmakefile rustavailable rustfmt rustfmtcheck
 # Installation targets should not require compiler. Unfortunately, vdso_install
 # is an exception where build artifacts may be updated. This must be fixed.
 no-compiler-targets := $(no-dot-config-targets) install dtbs_install \
@@ -1605,7 +1604,7 @@ MRPROPER_FILES += include/config include/generated          \
                  certs/signing_key.pem \
                  certs/x509.genkey \
                  vmlinux-gdb.py \
-                 *.spec \
+                 *.spec rpmbuild \
                  rust/libmacros.so
 
 # clean - Delete most, but leave enough to build external modules
@@ -1656,10 +1655,6 @@ distclean: mrproper
 %pkg: include/config/kernel.release FORCE
        $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.package $@
 
-PHONY += scripts_package
-scripts_package: scripts_basic
-       $(Q)$(MAKE) $(build)=scripts scripts/list-gitignored
-
 # Brief documentation of the typical targets used
 # ---------------------------------------------------------------------------
 
@@ -1886,6 +1881,8 @@ endif
 
 else # KBUILD_EXTMOD
 
+filechk_kernel.release = echo $(KERNELRELEASE)
+
 ###
 # External module support.
 # When building external modules the kernel used as basis is considered
index 94944cc219317fd43ab32971e0936f9dbb501de7..dd03e3860f97f901524570a89552adee135accce 100644 (file)
 
 &usbotg1 {
        pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_usbotg1>;
        disable-over-current;
        srp-disable;
        hnp-disable;
index ace3eb8a97b87c4002be0db5e21cb294d9aeccb1..4e1bf080eaca012ae0d1f9e6e74696eaea41da28 100644 (file)
 
 &usbotg1 {
        pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_usbotg1>;
        disable-over-current;
        srp-disable;
        hnp-disable;
index da1399057634a040321139407b0e3afecbddd149..815119c12bd48286df5bf2f0d936508d994c08a1 100644 (file)
 
 &usbotg1 {
        pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_usbotg1>;
        disable-over-current;
        srp-disable;
        hnp-disable;
index de2fb1c01b6e348ea188f1d34b51baf045cd8774..b82381229adf6ebdb1533af643e781a6717e6791 100644 (file)
        };
 
        reserved-memory {
+               sbl_region: sbl@2f00000 {
+                       reg = <0x02f00000 0x100000>;
+                       no-map;
+               };
+
+               external_image_region: external-image@3100000 {
+                       reg = <0x03100000 0x200000>;
+                       no-map;
+               };
+
                adsp_region: adsp@3300000 {
                        reg = <0x03300000 0x1400000>;
                        no-map;
index 14eecaaf295fabacb68ac687912eb8f8c1da7a46..e4c2677cc1e9e45cac842f71d88267cb3cce32ac 100644 (file)
@@ -116,7 +116,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
                        tocopy = n;
 
                ua_flags = uaccess_save_and_enable();
-               memcpy((void *)to, from, tocopy);
+               __memcpy((void *)to, from, tocopy);
                uaccess_restore(ua_flags);
                to += tocopy;
                from += tocopy;
@@ -178,7 +178,7 @@ __clear_user_memset(void __user *addr, unsigned long n)
                        tocopy = n;
 
                ua_flags = uaccess_save_and_enable();
-               memset((void *)addr, 0, tocopy);
+               __memset((void *)addr, 0, tocopy);
                uaccess_restore(ua_flags);
                addr += tocopy;
                n -= tocopy;
index af9194eca5564df46b71cd51549a5e2898d75bf8..73eb6061c73eeaa475ef80812c2371476da36210 100644 (file)
 };
 
 &enetc_port2 {
-       nvmem-cells = <&base_mac_address 2>;
-       nvmem-cell-names = "mac-address";
        status = "okay";
 };
 
 &enetc_port3 {
-       nvmem-cells = <&base_mac_address 3>;
-       nvmem-cell-names = "mac-address";
        status = "okay";
 };
 
@@ -84,8 +80,6 @@
        managed = "in-band-status";
        phy-handle = <&qsgmii_phy0>;
        phy-mode = "qsgmii";
-       nvmem-cells = <&base_mac_address 4>;
-       nvmem-cell-names = "mac-address";
        status = "okay";
 };
 
@@ -94,8 +88,6 @@
        managed = "in-band-status";
        phy-handle = <&qsgmii_phy1>;
        phy-mode = "qsgmii";
-       nvmem-cells = <&base_mac_address 5>;
-       nvmem-cell-names = "mac-address";
        status = "okay";
 };
 
        managed = "in-band-status";
        phy-handle = <&qsgmii_phy2>;
        phy-mode = "qsgmii";
-       nvmem-cells = <&base_mac_address 6>;
-       nvmem-cell-names = "mac-address";
        status = "okay";
 };
 
        managed = "in-band-status";
        phy-handle = <&qsgmii_phy3>;
        phy-mode = "qsgmii";
-       nvmem-cells = <&base_mac_address 7>;
-       nvmem-cell-names = "mac-address";
        status = "okay";
 };
 
index 1f34c75534594ce25b440ee6d5dae1be0b3168ec..7cd29ab970d9237ee4675c87ee6f39b279aae2ba 100644 (file)
@@ -55,7 +55,5 @@
 &enetc_port1 {
        phy-handle = <&phy0>;
        phy-mode = "rgmii-id";
-       nvmem-cells = <&base_mac_address 0>;
-       nvmem-cell-names = "mac-address";
        status = "okay";
 };
index aac41192caa12801d853f96d71adee7a2599e808..113b1df74bf87cdebf0607ef3020c3e2b35ed6b2 100644 (file)
 };
 
 &enetc_port2 {
-       nvmem-cells = <&base_mac_address 2>;
-       nvmem-cell-names = "mac-address";
        status = "okay";
 };
 
 &enetc_port3 {
-       nvmem-cells = <&base_mac_address 3>;
-       nvmem-cell-names = "mac-address";
        status = "okay";
 };
 
@@ -56,8 +52,6 @@
        managed = "in-band-status";
        phy-handle = <&phy0>;
        phy-mode = "sgmii";
-       nvmem-cells = <&base_mac_address 0>;
-       nvmem-cell-names = "mac-address";
        status = "okay";
 };
 
@@ -66,8 +60,6 @@
        managed = "in-band-status";
        phy-handle = <&phy1>;
        phy-mode = "sgmii";
-       nvmem-cells = <&base_mac_address 1>;
-       nvmem-cell-names = "mac-address";
        status = "okay";
 };
 
index a4421db3784e325602ac89c9c72d28b34418d6ce..9b5e92fb753e22effdb5b14f04cf3371291df162 100644 (file)
@@ -43,7 +43,5 @@
 &enetc_port1 {
        phy-handle = <&phy1>;
        phy-mode = "rgmii-id";
-       nvmem-cells = <&base_mac_address 1>;
-       nvmem-cell-names = "mac-address";
        status = "okay";
 };
index 8b65af4a7147b6520907e282171bde5611acdffb..4ab17b984b03bc77e2243cbbfffb4d19ef3f8a4f 100644 (file)
@@ -92,8 +92,6 @@
        phy-handle = <&phy0>;
        phy-mode = "sgmii";
        managed = "in-band-status";
-       nvmem-cells = <&base_mac_address 0>;
-       nvmem-cell-names = "mac-address";
        status = "okay";
 };
 
                                label = "bootloader environment";
                        };
                };
-
-               otp-1 {
-                       compatible = "user-otp";
-
-                       nvmem-layout {
-                               compatible = "kontron,sl28-vpd";
-
-                               serial_number: serial-number {
-                               };
-
-                               base_mac_address: base-mac-address {
-                                       #nvmem-cell-cells = <1>;
-                               };
-                       };
-               };
        };
 };
 
index 1f3d225e64ece966390d4bdc967f538034a4d977..06b94bbc2b97d316188aec7fdf75d013d8d485ac 100644 (file)
@@ -117,7 +117,7 @@ lsio_subsys: bus@5d000000 {
                interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&clk IMX_SC_R_FSPI_0 IMX_SC_PM_CLK_PER>,
                         <&clk IMX_SC_R_FSPI_0 IMX_SC_PM_CLK_PER>;
-               clock-names = "fspi", "fspi_en";
+               clock-names = "fspi_en", "fspi";
                power-domains = <&pd IMX_SC_R_FSPI_0>;
                status = "disabled";
        };
index 1bcf228a22b8b3d21585fe338cb2eafa5127830f..852420349c01332544247d5de35acdfd0ee71d82 100644 (file)
        phy-handle = <&ethphy0>;
        nvmem-cells = <&fec_mac1>;
        nvmem-cell-names = "mac-address";
-       snps,reset-gpios = <&pca6416_1 2 GPIO_ACTIVE_LOW>;
-       snps,reset-delays-us = <10 20 200000>;
        status = "okay";
 
        mdio {
                        eee-broken-1000t;
                        qca,disable-smarteee;
                        qca,disable-hibernation-mode;
+                       reset-gpios = <&pca6416_1 2 GPIO_ACTIVE_LOW>;
+                       reset-assert-us = <20>;
+                       reset-deassert-us = <200000>;
                        vddio-supply = <&vddio0>;
 
                        vddio0: vddio-regulator {
index 6357078185eddb44d64d4ce0cbb4b89231a55111..0e8f0d7161ad0f4989c91149bbb29f20d000a45e 100644 (file)
                compatible = "wlf,wm8960";
                reg = <0x1a>;
                clocks = <&clk IMX8MM_CLK_SAI1_ROOT>;
-               clock-names = "mclk1";
+               clock-names = "mclk";
                wlf,shared-lrclk;
                #sound-dai-cells = <0>;
        };
index ed9ac6c5047c087565adbb8ed406c6677ec2fe22..9e0ddd6b7a322d269658c8a886fe1df9a59e9214 100644 (file)
                                sai2: sai@30020000 {
                                        compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai";
                                        reg = <0x30020000 0x10000>;
+                                       #sound-dai-cells = <0>;
                                        interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
                                        clocks = <&clk IMX8MN_CLK_SAI2_IPG>,
                                                <&clk IMX8MN_CLK_DUMMY>,
                                sai3: sai@30030000 {
                                        compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai";
                                        reg = <0x30030000 0x10000>;
+                                       #sound-dai-cells = <0>;
                                        interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
                                        clocks = <&clk IMX8MN_CLK_SAI3_IPG>,
                                                 <&clk IMX8MN_CLK_DUMMY>,
                                sai5: sai@30050000 {
                                        compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai";
                                        reg = <0x30050000 0x10000>;
+                                       #sound-dai-cells = <0>;
                                        interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>;
                                        clocks = <&clk IMX8MN_CLK_SAI5_IPG>,
                                                 <&clk IMX8MN_CLK_DUMMY>,
                                sai6: sai@30060000 {
                                        compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai";
                                        reg = <0x30060000  0x10000>;
+                                       #sound-dai-cells = <0>;
                                        interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>;
                                        clocks = <&clk IMX8MN_CLK_SAI6_IPG>,
                                                 <&clk IMX8MN_CLK_DUMMY>,
                                sai7: sai@300b0000 {
                                        compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai";
                                        reg = <0x300b0000 0x10000>;
+                                       #sound-dai-cells = <0>;
                                        interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
                                        clocks = <&clk IMX8MN_CLK_SAI7_IPG>,
                                                 <&clk IMX8MN_CLK_DUMMY>,
index a19224fe1a6adf15fdbabb87baffcd4c7fb359db..2dd60e3252f356b07abb073363f1e7f50449f8df 100644 (file)
                                reg = <0x32e90000 0x238>;
                                interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&clk IMX8MP_CLK_MEDIA_DISP2_PIX_ROOT>,
-                                        <&clk IMX8MP_CLK_MEDIA_AXI_ROOT>,
-                                        <&clk IMX8MP_CLK_MEDIA_APB_ROOT>;
+                                        <&clk IMX8MP_CLK_MEDIA_APB_ROOT>,
+                                        <&clk IMX8MP_CLK_MEDIA_AXI_ROOT>;
                                clock-names = "pix", "axi", "disp_axi";
                                assigned-clocks = <&clk IMX8MP_CLK_MEDIA_DISP2_PIX>,
                                                  <&clk IMX8MP_VIDEO_PLL1>;
index 2076f9c9983ad0317c5881cc498cbe7a6c532ff3..41efd97dd6d6dcee0318607608b63faf0f3c6ff6 100644 (file)
                        lpi2c1: i2c@44340000 {
                                compatible = "fsl,imx93-lpi2c", "fsl,imx7ulp-lpi2c";
                                reg = <0x44340000 0x10000>;
+                               #address-cells = <1>;
+                               #size-cells = <0>;
                                interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&clk IMX93_CLK_LPI2C1_GATE>,
                                         <&clk IMX93_CLK_BUS_AON>;
                        lpi2c2: i2c@44350000 {
                                compatible = "fsl,imx93-lpi2c", "fsl,imx7ulp-lpi2c";
                                reg = <0x44350000 0x10000>;
+                               #address-cells = <1>;
+                               #size-cells = <0>;
                                interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&clk IMX93_CLK_LPI2C2_GATE>,
                                         <&clk IMX93_CLK_BUS_AON>;
                        lpi2c3: i2c@42530000 {
                                compatible = "fsl,imx93-lpi2c", "fsl,imx7ulp-lpi2c";
                                reg = <0x42530000 0x10000>;
+                               #address-cells = <1>;
+                               #size-cells = <0>;
                                interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&clk IMX93_CLK_LPI2C3_GATE>,
                                         <&clk IMX93_CLK_BUS_WAKEUP>;
                        lpi2c4: i2c@42540000 {
                                compatible = "fsl,imx93-lpi2c", "fsl,imx7ulp-lpi2c";
                                reg = <0x42540000 0x10000>;
+                               #address-cells = <1>;
+                               #size-cells = <0>;
                                interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&clk IMX93_CLK_LPI2C4_GATE>,
                                         <&clk IMX93_CLK_BUS_WAKEUP>;
                        lpi2c5: i2c@426b0000 {
                                compatible = "fsl,imx93-lpi2c", "fsl,imx7ulp-lpi2c";
                                reg = <0x426b0000 0x10000>;
+                               #address-cells = <1>;
+                               #size-cells = <0>;
                                interrupts = <GIC_SPI 195 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&clk IMX93_CLK_LPI2C5_GATE>,
                                         <&clk IMX93_CLK_BUS_WAKEUP>;
                        lpi2c6: i2c@426c0000 {
                                compatible = "fsl,imx93-lpi2c", "fsl,imx7ulp-lpi2c";
                                reg = <0x426c0000 0x10000>;
+                               #address-cells = <1>;
+                               #size-cells = <0>;
                                interrupts = <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&clk IMX93_CLK_LPI2C6_GATE>,
                                         <&clk IMX93_CLK_BUS_WAKEUP>;
                        lpi2c7: i2c@426d0000 {
                                compatible = "fsl,imx93-lpi2c", "fsl,imx7ulp-lpi2c";
                                reg = <0x426d0000 0x10000>;
+                               #address-cells = <1>;
+                               #size-cells = <0>;
                                interrupts = <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&clk IMX93_CLK_LPI2C7_GATE>,
                                         <&clk IMX93_CLK_BUS_WAKEUP>;
                        lpi2c8: i2c@426e0000 {
                                compatible = "fsl,imx93-lpi2c", "fsl,imx7ulp-lpi2c";
                                reg = <0x426e0000 0x10000>;
+                               #address-cells = <1>;
+                               #size-cells = <0>;
                                interrupts = <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&clk IMX93_CLK_LPI2C8_GATE>,
                                         <&clk IMX93_CLK_BUS_WAKEUP>;
                        eqos: ethernet@428a0000 {
                                compatible = "nxp,imx93-dwmac-eqos", "snps,dwmac-5.10a";
                                reg = <0x428a0000 0x10000>;
-                               interrupts = <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>,
-                                            <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
-                               interrupt-names = "eth_wake_irq", "macirq";
+                               interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>,
+                                            <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupt-names = "macirq", "eth_wake_irq";
                                clocks = <&clk IMX93_CLK_ENET_QOS_GATE>,
                                         <&clk IMX93_CLK_ENET_QOS_GATE>,
                                         <&clk IMX93_CLK_ENET_TIMER2>,
                                                         <&clk IMX93_CLK_SYS_PLL_PFD0_DIV2>;
                                assigned-clock-rates = <100000000>, <250000000>;
                                intf_mode = <&wakeupmix_gpr 0x28>;
-                               clk_csr = <0>;
+                               snps,clk-csr = <0>;
                                status = "disabled";
                        };
 
index 133dbe5b429d8db13a26cc703bde55f17b9c588c..7096b999b33f8a9ca9ac2b7b4de9cefede4f98c8 100644 (file)
@@ -22,7 +22,7 @@
 
                #address-cells = <2>;
                #size-cells = <2>;
-               ranges = <0x0 0x0 0x0 0x0 0x0 0x40000000>;
+               ranges = <0x0 0x0 0x0 0x0 0x100 0x0>;
 
                apbmisc: misc@100000 {
                        compatible = "nvidia,tegra194-misc";
index 8fe8eda7654d878387d5a2a25f75009045e056d5..f1748cff8a33ba6c1261ef6532b8fb8324f19958 100644 (file)
@@ -20,7 +20,7 @@
 
                #address-cells = <2>;
                #size-cells = <2>;
-               ranges = <0x0 0x0 0x0 0x0 0x0 0x40000000>;
+               ranges = <0x0 0x0 0x0 0x0 0x100 0x0>;
 
                misc@100000 {
                        compatible = "nvidia,tegra234-misc";
index c492db8561904e0deef7751dd2d5e48d076d56a1..82e260375174d1b86608db45876ff363ff1acd52 100644 (file)
@@ -33,7 +33,3 @@
 &gpio_leds_default {
        pins = "gpio81", "gpio82", "gpio83";
 };
-
-&sim_ctrl_default {
-       pins = "gpio1", "gpio2";
-};
index 700cf81cbf8c0cb512d0bdabc8e70d5f90ce3eab..8433c9710b1cfc11a77e319e0f70533beb0c213c 100644 (file)
        gpios = <&msmgpio 20 GPIO_ACTIVE_HIGH>;
 };
 
+&mpss {
+       pinctrl-0 = <&sim_ctrl_default>;
+       pinctrl-names = "default";
+};
+
 &button_default {
        pins = "gpio37";
        bias-pull-down;
        pins = "gpio20", "gpio21", "gpio22";
 };
 
-&sim_ctrl_default {
-       pins = "gpio1", "gpio2";
+/* This selects the external SIM card slot by default */
+&msmgpio {
+       sim_ctrl_default: sim-ctrl-default-state {
+               esim-sel-pins {
+                       pins = "gpio0", "gpio3";
+                       bias-disable;
+                       output-low;
+               };
+
+               sim-en-pins {
+                       pins = "gpio1";
+                       bias-disable;
+                       output-low;
+               };
+
+               sim-sel-pins {
+                       pins = "gpio2";
+                       bias-disable;
+                       output-high;
+               };
+       };
 };
index 790a9696da9de7457ba1fa7b1caff90ff0adb505..cdf34b74fa8faa132c0bad1cfd7b6efac40bd380 100644 (file)
@@ -92,9 +92,6 @@
 };
 
 &mpss {
-       pinctrl-0 = <&sim_ctrl_default>;
-       pinctrl-names = "default";
-
        status = "okay";
 };
 
                drive-strength = <2>;
                bias-disable;
        };
-
-       sim_ctrl_default: sim-ctrl-default-state {
-               function = "gpio";
-               drive-strength = <2>;
-               bias-disable;
-               output-low;
-       };
 };
index 3ccb5ffdb3ca3f29bbc636026f7944f1c2494ec2..24fa449d48a6640a0753366c95b947f02c51b431 100644 (file)
 };
 
 &remoteproc_nsp0 {
-       firmware-name = "qcom/sa8540p/cdsp.mbn";
+       firmware-name = "qcom/sa8540p/cdsp0.mbn";
        status = "okay";
 };
 
index bdcb74925313042d218163cb2b4fa01cf6488da3..8f4ab6bd28864fc680d9fcc8da96d2a770f80c8d 100644 (file)
                        pinctrl-names = "default";
                        pinctrl-0 = <&pcie1_clkreq_n>;
 
+                       dma-coherent;
+
                        iommus = <&apps_smmu 0x1c80 0x1>;
 
                        iommu-map = <0x0 &apps_smmu 0x1c80 0x1>,
index 98e71b933437c91693d91654c9598869e24773ec..99c6d6574559fbfb0b73ffb3cdca89450f29db62 100644 (file)
                        regulator-min-microvolt = <1800000>;
                        regulator-max-microvolt = <1800000>;
                        regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+                       regulator-always-on;
                };
 
                vreg_s11b: smps11 {
                        regulator-min-microvolt = <1272000>;
                        regulator-max-microvolt = <1272000>;
                        regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+                       regulator-always-on;
                };
 
                vreg_s12b: smps12 {
                        regulator-min-microvolt = <984000>;
                        regulator-max-microvolt = <984000>;
                        regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+                       regulator-always-on;
                };
 
                vreg_l3b: ldo3 {
                        regulator-min-microvolt = <3008000>;
                        regulator-max-microvolt = <3960000>;
                        regulator-initial-mode = <RPMH_REGULATOR_MODE_AUTO>;
+                       regulator-always-on;
                };
        };
 
        pmic-die-temp@3 {
                reg = <PMK8350_ADC7_DIE_TEMP>;
                qcom,pre-scaling = <1 1>;
+               label = "pmk8350_die_temp";
        };
 
        xo-therm@44 {
                reg = <PMK8350_ADC7_AMUX_THM1_100K_PU>;
                qcom,hw-settle-time = <200>;
                qcom,ratiometric;
+               label = "pmk8350_xo_therm";
        };
 
        pmic-die-temp@103 {
                reg = <PM8350_ADC7_DIE_TEMP(1)>;
                qcom,pre-scaling = <1 1>;
+               label = "pmc8280_1_die_temp";
        };
 
        sys-therm@144 {
                reg = <PM8350_ADC7_AMUX_THM1_100K_PU(1)>;
                qcom,hw-settle-time = <200>;
                qcom,ratiometric;
+               label = "sys_therm1";
        };
 
        sys-therm@145 {
                reg = <PM8350_ADC7_AMUX_THM2_100K_PU(1)>;
                qcom,hw-settle-time = <200>;
                qcom,ratiometric;
+               label = "sys_therm2";
        };
 
        sys-therm@146 {
                reg = <PM8350_ADC7_AMUX_THM3_100K_PU(1)>;
                qcom,hw-settle-time = <200>;
                qcom,ratiometric;
+               label = "sys_therm3";
        };
 
        sys-therm@147 {
                reg = <PM8350_ADC7_AMUX_THM4_100K_PU(1)>;
                qcom,hw-settle-time = <200>;
                qcom,ratiometric;
+               label = "sys_therm4";
        };
 
        pmic-die-temp@303 {
                reg = <PM8350_ADC7_DIE_TEMP(3)>;
                qcom,pre-scaling = <1 1>;
+               label = "pmc8280_2_die_temp";
        };
 
        sys-therm@344 {
                reg = <PM8350_ADC7_AMUX_THM1_100K_PU(3)>;
                qcom,hw-settle-time = <200>;
                qcom,ratiometric;
+               label = "sys_therm5";
        };
 
        sys-therm@345 {
                reg = <PM8350_ADC7_AMUX_THM2_100K_PU(3)>;
                qcom,hw-settle-time = <200>;
                qcom,ratiometric;
+               label = "sys_therm6";
        };
 
        sys-therm@346 {
                reg = <PM8350_ADC7_AMUX_THM3_100K_PU(3)>;
                qcom,hw-settle-time = <200>;
                qcom,ratiometric;
+               label = "sys_therm7";
        };
 
        sys-therm@347 {
                reg = <PM8350_ADC7_AMUX_THM4_100K_PU(3)>;
                qcom,hw-settle-time = <200>;
                qcom,ratiometric;
+               label = "sys_therm8";
        };
 
        pmic-die-temp@403 {
                reg = <PMR735A_ADC7_DIE_TEMP>;
                qcom,pre-scaling = <1 1>;
+               label = "pmr735a_die_temp";
        };
 };
 
                "VA DMIC0", "MIC BIAS1",
                "VA DMIC1", "MIC BIAS1",
                "VA DMIC2", "MIC BIAS3",
-               "TX DMIC0", "MIC BIAS1",
-               "TX DMIC1", "MIC BIAS2",
-               "TX DMIC2", "MIC BIAS3",
+               "VA DMIC0", "VA MIC BIAS1",
+               "VA DMIC1", "VA MIC BIAS1",
+               "VA DMIC2", "VA MIC BIAS3",
                "TX SWR_ADC1", "ADC2_OUTPUT";
 
        wcd-playback-dai-link {
        va-dai-link {
                link-name = "VA Capture";
                cpu {
-                       sound-dai = <&q6apmbedai TX_CODEC_DMA_TX_3>;
+                       sound-dai = <&q6apmbedai VA_CODEC_DMA_TX_0>;
                };
 
                platform {
 
        vdd-micb-supply = <&vreg_s10b>;
 
-       qcom,dmic-sample-rate = <600000>;
+       qcom,dmic-sample-rate = <4800000>;
 
        status = "okay";
 };
index 0d02599d8867227eb22c7594dbfeb4da34fc12b3..42bfa9fa5b9675b799eef46540de42388eafb0c7 100644 (file)
                        qcom,ports-sinterval-low =      /bits/ 8 <0x03 0x1f 0x1f 0x07 0x00>;
                        qcom,ports-offset1 =            /bits/ 8 <0x00 0x00 0x0B 0x01 0x00>;
                        qcom,ports-offset2 =            /bits/ 8 <0x00 0x00 0x0B 0x00 0x00>;
-                       qcom,ports-hstart =             /bits/ 8 <0xff 0x03 0xff 0xff 0xff>;
-                       qcom,ports-hstop =              /bits/ 8 <0xff 0x06 0xff 0xff 0xff>;
+                       qcom,ports-hstart =             /bits/ 8 <0xff 0x03 0x00 0xff 0xff>;
+                       qcom,ports-hstop =              /bits/ 8 <0xff 0x06 0x0f 0xff 0xff>;
                        qcom,ports-word-length =        /bits/ 8 <0x01 0x07 0x04 0xff 0xff>;
-                       qcom,ports-block-pack-mode =    /bits/ 8 <0xff 0x00 0x01 0xff 0xff>;
+                       qcom,ports-block-pack-mode =    /bits/ 8 <0xff 0xff 0x01 0xff 0xff>;
                        qcom,ports-lane-control =       /bits/ 8 <0x01 0x00 0x00 0x00 0x00>;
-                       qcom,ports-block-group-count =  /bits/ 8 <0xff 0xff 0xff 0xff 0x00>;
+                       qcom,ports-block-group-count =  /bits/ 8 <0xff 0xff 0xff 0xff 0xff>;
 
                        #sound-dai-cells = <1>;
                        #address-cells = <2>;
                                              <&intc GIC_SPI 520 IRQ_TYPE_LEVEL_HIGH>;
                        interrupt-names = "core", "wake";
 
-                       clocks = <&vamacro>;
+                       clocks = <&txmacro>;
                        clock-names = "iface";
                        label = "TX";
                        #sound-dai-cells = <1>;
 
                        qcom,din-ports = <4>;
                        qcom,dout-ports = <0>;
-                       qcom,ports-sinterval-low =      /bits/ 8 <0x01 0x03 0x03 0x03>;
-                       qcom,ports-offset1 =            /bits/ 8 <0x01 0x00 0x02 0x01>;
+                       qcom,ports-sinterval-low =      /bits/ 8 <0x01 0x01 0x03 0x03>;
+                       qcom,ports-offset1 =            /bits/ 8 <0x01 0x00 0x02 0x00>;
                        qcom,ports-offset2 =            /bits/ 8 <0x00 0x00 0x00 0x00>;
                        qcom,ports-block-pack-mode =    /bits/ 8 <0xff 0xff 0xff 0xff>;
                        qcom,ports-hstart =             /bits/ 8 <0xff 0xff 0xff 0xff>;
                        qcom,ports-hstop =              /bits/ 8 <0xff 0xff 0xff 0xff>;
-                       qcom,ports-word-length =        /bits/ 8 <0xff 0x00 0xff 0xff>;
+                       qcom,ports-word-length =        /bits/ 8 <0xff 0xff 0xff 0xff>;
                        qcom,ports-block-group-count =  /bits/ 8 <0xff 0xff 0xff 0xff>;
-                       qcom,ports-lane-control =       /bits/ 8 <0x00 0x01 0x00 0x00>;
+                       qcom,ports-lane-control =       /bits/ 8 <0x00 0x01 0x00 0x01>;
 
                        status = "disabled";
                };
index 4d6ec815b78b11b89c0087e0d6a1f94bfa6031d4..fbd67d2c8d78133dc5a44e22f91a328e1868cb71 100644 (file)
                                dma-names = "tx", "rx";
                                #address-cells = <1>;
                                #size-cells = <0>;
+                               status = "disabled";
                        };
                };
 
index 31b88c738510278eeab6aee842aabda505449fb3..068ee4f724855a256d8efb92f4f30e1d3dc4cd0f 100644 (file)
                        clock-names = "xo";
 
                        power-domains = <&rpmpd SM6375_VDDCX>;
+                       power-domain-names = "cx";
 
                        memory-region = <&pil_cdsp_mem>;
 
index fd20096cfc6e30f9deb5be005346deb38c012494..13e0ce8286061c7f5818c2cc08a022f4102ec28f 100644 (file)
                                      "slave_q2a",
                                      "tbu";
 
-                       iommus = <&apps_smmu 0x1d80 0x7f>;
+                       iommus = <&apps_smmu 0x1d80 0x3f>;
                        iommu-map = <0x0   &apps_smmu 0x1d80 0x1>,
                                    <0x100 &apps_smmu 0x1d81 0x1>;
 
                        assigned-clocks = <&gcc GCC_PCIE_1_AUX_CLK>;
                        assigned-clock-rates = <19200000>;
 
-                       iommus = <&apps_smmu 0x1e00 0x7f>;
+                       iommus = <&apps_smmu 0x1e00 0x3f>;
                        iommu-map = <0x0   &apps_smmu 0x1e00 0x1>,
                                    <0x100 &apps_smmu 0x1e01 0x1>;
 
index acaa99c5ff8b11842e42d44258b0f053486854df..a85d47f7a9e82833dbed82f965417b938c0cd2f0 100644 (file)
 };
 
 &venus {
-       firmware-name = "qcom/sm8250/elish/venus.mbn";
+       firmware-name = "qcom/sm8250/xiaomi/elish/venus.mbn";
        status = "okay";
 };
index 1c97e28da6ad85467e99eab74e520e05be1daa15..1a5a612d4234b8a6662cab0ad3850b85cfe116ba 100644 (file)
                        power-domains = <&gcc UFS_PHY_GDSC>;
 
                        iommus = <&apps_smmu 0xe0 0x0>;
+                       dma-coherent;
 
                        clock-names =
                                "core_clk",
index 1a744a33bcf4bbdd1fc0595669777c09a41cf67f..b285b1530c109a4e52d6e0298ed0b158312cc9f6 100644 (file)
                                 <&q6prmcc LPASS_HW_DCODEC_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
                                 <&vamacro>;
                        clock-names = "mclk", "npl", "macro", "dcodec", "fsgen";
-                       assigned-clocks = <&q6prmcc LPASS_CLK_ID_WSA_CORE_TX_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
-                                         <&q6prmcc LPASS_CLK_ID_WSA_CORE_TX_2X_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>;
+                       assigned-clocks = <&q6prmcc LPASS_CLK_ID_WSA2_CORE_TX_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
+                                         <&q6prmcc LPASS_CLK_ID_WSA2_CORE_TX_2X_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>;
                        assigned-clock-rates = <19200000>, <19200000>;
 
                        #clock-cells = <0>;
                        power-domains = <&gcc UFS_PHY_GDSC>;
 
                        iommus = <&apps_smmu 0xe0 0x0>;
+                       dma-coherent;
 
                        interconnects = <&aggre1_noc MASTER_UFS_MEM 0 &mc_virt SLAVE_EBI1 0>,
                                        <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_UFS_MEM_CFG 0>;
index ff4d342c072571079dbfeb8694531a73ba5dde92..5d0888398b3c377f7504f6c4f10394e34fb2e666 100644 (file)
@@ -66,7 +66,7 @@
 
                CPU0: cpu@0 {
                        device_type = "cpu";
-                       compatible = "qcom,kryo";
+                       compatible = "arm,cortex-a510";
                        reg = <0 0>;
                        enable-method = "psci";
                        next-level-cache = <&L2_0>;
@@ -89,7 +89,7 @@
 
                CPU1: cpu@100 {
                        device_type = "cpu";
-                       compatible = "qcom,kryo";
+                       compatible = "arm,cortex-a510";
                        reg = <0 0x100>;
                        enable-method = "psci";
                        next-level-cache = <&L2_100>;
 
                CPU2: cpu@200 {
                        device_type = "cpu";
-                       compatible = "qcom,kryo";
+                       compatible = "arm,cortex-a510";
                        reg = <0 0x200>;
                        enable-method = "psci";
                        next-level-cache = <&L2_200>;
 
                CPU3: cpu@300 {
                        device_type = "cpu";
-                       compatible = "qcom,kryo";
+                       compatible = "arm,cortex-a715";
                        reg = <0 0x300>;
                        enable-method = "psci";
                        next-level-cache = <&L2_300>;
 
                CPU4: cpu@400 {
                        device_type = "cpu";
-                       compatible = "qcom,kryo";
+                       compatible = "arm,cortex-a715";
                        reg = <0 0x400>;
                        enable-method = "psci";
                        next-level-cache = <&L2_400>;
 
                CPU5: cpu@500 {
                        device_type = "cpu";
-                       compatible = "qcom,kryo";
+                       compatible = "arm,cortex-a710";
                        reg = <0 0x500>;
                        enable-method = "psci";
                        next-level-cache = <&L2_500>;
 
                CPU6: cpu@600 {
                        device_type = "cpu";
-                       compatible = "qcom,kryo";
+                       compatible = "arm,cortex-a710";
                        reg = <0 0x600>;
                        enable-method = "psci";
                        next-level-cache = <&L2_600>;
 
                CPU7: cpu@700 {
                        device_type = "cpu";
-                       compatible = "qcom,kryo";
+                       compatible = "arm,cortex-x3";
                        reg = <0 0x700>;
                        enable-method = "psci";
                        next-level-cache = <&L2_700>;
                        required-opps = <&rpmhpd_opp_nom>;
 
                        iommus = <&apps_smmu 0x60 0x0>;
+                       dma-coherent;
 
                        interconnects = <&aggre1_noc MASTER_UFS_MEM 0 &mc_virt SLAVE_EBI1 0>,
                                        <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_UFS_MEM_CFG 0>;
                lpass_tlmm: pinctrl@6e80000 {
                        compatible = "qcom,sm8550-lpass-lpi-pinctrl";
                        reg = <0 0x06e80000 0 0x20000>,
-                             <0 0x0725a000 0 0x10000>;
+                             <0 0x07250000 0 0x10000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        gpio-ranges = <&lpass_tlmm 0 0 23>;
                                pins = "gpio28", "gpio29";
                                function = "qup1_se0";
                                drive-strength = <2>;
-                               bias-pull-up;
+                               bias-pull-up = <2200>;
                        };
 
                        qup_i2c1_data_clk: qup-i2c1-data-clk-state {
                                pins = "gpio32", "gpio33";
                                function = "qup1_se1";
                                drive-strength = <2>;
-                               bias-pull-up;
+                               bias-pull-up = <2200>;
                        };
 
                        qup_i2c2_data_clk: qup-i2c2-data-clk-state {
                                pins = "gpio36", "gpio37";
                                function = "qup1_se2";
                                drive-strength = <2>;
-                               bias-pull-up;
+                               bias-pull-up = <2200>;
                        };
 
                        qup_i2c3_data_clk: qup-i2c3-data-clk-state {
                                pins = "gpio40", "gpio41";
                                function = "qup1_se3";
                                drive-strength = <2>;
-                               bias-pull-up;
+                               bias-pull-up = <2200>;
                        };
 
                        qup_i2c4_data_clk: qup-i2c4-data-clk-state {
                                pins = "gpio44", "gpio45";
                                function = "qup1_se4";
                                drive-strength = <2>;
-                               bias-pull-up;
+                               bias-pull-up = <2200>;
                        };
 
                        qup_i2c5_data_clk: qup-i2c5-data-clk-state {
                                pins = "gpio52", "gpio53";
                                function = "qup1_se5";
                                drive-strength = <2>;
-                               bias-pull-up;
+                               bias-pull-up = <2200>;
                        };
 
                        qup_i2c6_data_clk: qup-i2c6-data-clk-state {
                                pins = "gpio48", "gpio49";
                                function = "qup1_se6";
                                drive-strength = <2>;
-                               bias-pull-up;
+                               bias-pull-up = <2200>;
                        };
 
                        qup_i2c8_data_clk: qup-i2c8-data-clk-state {
                                        pins = "gpio57";
                                        function = "qup2_se0_l1_mira";
                                        drive-strength = <2>;
-                                       bias-pull-up;
+                                       bias-pull-up = <2200>;
                                };
 
                                sda-pins {
                                        pins = "gpio56";
                                        function = "qup2_se0_l0_mira";
                                        drive-strength = <2>;
-                                       bias-pull-up;
+                                       bias-pull-up = <2200>;
                                };
                        };
 
                                pins = "gpio60", "gpio61";
                                function = "qup2_se1";
                                drive-strength = <2>;
-                               bias-pull-up;
+                               bias-pull-up = <2200>;
                        };
 
                        qup_i2c10_data_clk: qup-i2c10-data-clk-state {
                                pins = "gpio64", "gpio65";
                                function = "qup2_se2";
                                drive-strength = <2>;
-                               bias-pull-up;
+                               bias-pull-up = <2200>;
                        };
 
                        qup_i2c11_data_clk: qup-i2c11-data-clk-state {
                                pins = "gpio68", "gpio69";
                                function = "qup2_se3";
                                drive-strength = <2>;
-                               bias-pull-up;
+                               bias-pull-up = <2200>;
                        };
 
                        qup_i2c12_data_clk: qup-i2c12-data-clk-state {
                                pins = "gpio2", "gpio3";
                                function = "qup2_se4";
                                drive-strength = <2>;
-                               bias-pull-up;
+                               bias-pull-up = <2200>;
                        };
 
                        qup_i2c13_data_clk: qup-i2c13-data-clk-state {
                                pins = "gpio80", "gpio81";
                                function = "qup2_se5";
                                drive-strength = <2>;
-                               bias-pull-up;
+                               bias-pull-up = <2200>;
                        };
 
                        qup_i2c15_data_clk: qup-i2c15-data-clk-state {
                                pins = "gpio72", "gpio106";
                                function = "qup2_se7";
                                drive-strength = <2>;
-                               bias-pull-up;
+                               bias-pull-up = <2200>;
                        };
 
                        qup_spi0_cs: qup-spi0-cs-state {
index 6f7b218a681f0857c764789466c7f6dca32e0a0f..b9e36611734fdf21a0a575a73bee8e46faf70a92 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/types.h>
 #include <linux/jump_label.h>
 #include <linux/kvm_types.h>
+#include <linux/maple_tree.h>
 #include <linux/percpu.h>
 #include <linux/psci.h>
 #include <asm/arch_gicv3.h>
@@ -199,6 +200,9 @@ struct kvm_arch {
        /* Mandated version of PSCI */
        u32 psci_version;
 
+       /* Protects VM-scoped configuration data */
+       struct mutex config_lock;
+
        /*
         * If we encounter a data abort without valid instruction syndrome
         * information, report this to user space.  User space can (and
@@ -221,7 +225,12 @@ struct kvm_arch {
 #define KVM_ARCH_FLAG_EL1_32BIT                                4
        /* PSCI SYSTEM_SUSPEND enabled for the guest */
 #define KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED           5
-
+       /* VM counter offset */
+#define KVM_ARCH_FLAG_VM_COUNTER_OFFSET                        6
+       /* Timer PPIs made immutable */
+#define KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE             7
+       /* SMCCC filter initialized for the VM */
+#define KVM_ARCH_FLAG_SMCCC_FILTER_CONFIGURED          8
        unsigned long flags;
 
        /*
@@ -242,6 +251,7 @@ struct kvm_arch {
 
        /* Hypercall features firmware registers' descriptor */
        struct kvm_smccc_features smccc_feat;
+       struct maple_tree smccc_filter;
 
        /*
         * For an untrusted host VM, 'pkvm.handle' is used to lookup
@@ -365,6 +375,10 @@ enum vcpu_sysreg {
        TPIDR_EL2,      /* EL2 Software Thread ID Register */
        CNTHCTL_EL2,    /* Counter-timer Hypervisor Control register */
        SP_EL2,         /* EL2 Stack Pointer */
+       CNTHP_CTL_EL2,
+       CNTHP_CVAL_EL2,
+       CNTHV_CTL_EL2,
+       CNTHV_CVAL_EL2,
 
        NR_SYS_REGS     /* Nothing after this line! */
 };
@@ -522,6 +536,7 @@ struct kvm_vcpu_arch {
 
        /* vcpu power state */
        struct kvm_mp_state mp_state;
+       spinlock_t mp_state_lock;
 
        /* Cache some mmu pages needed inside spinlock regions */
        struct kvm_mmu_memory_cache mmu_page_cache;
@@ -922,6 +937,9 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu);
 
 int __init kvm_sys_reg_table_init(void);
 
+bool lock_all_vcpus(struct kvm *kvm);
+void unlock_all_vcpus(struct kvm *kvm);
+
 /* MMIO helpers */
 void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
 unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
@@ -1007,6 +1025,8 @@ int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
 
 int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
                               struct kvm_arm_copy_mte_tags *copy_tags);
+int kvm_vm_ioctl_set_counter_offset(struct kvm *kvm,
+                                   struct kvm_arm_counter_offset *offset);
 
 /* Guest/host FPSIMD coordination helpers */
 int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
@@ -1061,6 +1081,9 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
        (system_supports_32bit_el0() &&                         \
         !static_branch_unlikely(&arm64_mismatched_32bit_el0))
 
+#define kvm_vm_has_ran_once(kvm)                                       \
+       (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &(kvm)->arch.flags))
+
 int kvm_trng_call(struct kvm_vcpu *vcpu);
 #ifdef CONFIG_KVM
 extern phys_addr_t hyp_mem_base;
index 083cc47dca086a272cd80e04a50a206aeff9fb09..27e63c111f78abc375a5dfbbf22cdfd565fadfe4 100644 (file)
@@ -63,6 +63,7 @@
  * specific registers encoded in the instructions).
  */
 .macro kern_hyp_va     reg
+#ifndef __KVM_VHE_HYPERVISOR__
 alternative_cb ARM64_ALWAYS_SYSTEM, kvm_update_va_mask
        and     \reg, \reg, #1          /* mask with va_mask */
        ror     \reg, \reg, #1          /* rotate to the first tag bit */
@@ -70,6 +71,7 @@ alternative_cb ARM64_ALWAYS_SYSTEM, kvm_update_va_mask
        add     \reg, \reg, #0, lsl 12  /* insert the top 12 bits of the tag */
        ror     \reg, \reg, #63         /* rotate back */
 alternative_cb_end
+#endif
 .endm
 
 /*
@@ -127,6 +129,7 @@ void kvm_apply_hyp_relocations(void);
 
 static __always_inline unsigned long __kern_hyp_va(unsigned long v)
 {
+#ifndef __KVM_VHE_HYPERVISOR__
        asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n"
                                    "ror %0, %0, #1\n"
                                    "add %0, %0, #0\n"
@@ -135,6 +138,7 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v)
                                    ARM64_ALWAYS_SYSTEM,
                                    kvm_update_va_mask)
                     : "+r" (v));
+#endif
        return v;
 }
 
index 9e3ecba3c4e67936a85ad11121052181f250aa51..a43f21559c3ee5aa2567187a00cbae6346882d6e 100644 (file)
 
 #define SYS_CNTFRQ_EL0                 sys_reg(3, 3, 14, 0, 0)
 
+#define SYS_CNTPCT_EL0                 sys_reg(3, 3, 14, 0, 1)
 #define SYS_CNTPCTSS_EL0               sys_reg(3, 3, 14, 0, 5)
 #define SYS_CNTVCTSS_EL0               sys_reg(3, 3, 14, 0, 6)
 
 
 #define SYS_AARCH32_CNTP_TVAL          sys_reg(0, 0, 14, 2, 0)
 #define SYS_AARCH32_CNTP_CTL           sys_reg(0, 0, 14, 2, 1)
+#define SYS_AARCH32_CNTPCT             sys_reg(0, 0, 0, 14, 0)
 #define SYS_AARCH32_CNTP_CVAL          sys_reg(0, 2, 0, 14, 0)
+#define SYS_AARCH32_CNTPCTSS           sys_reg(0, 8, 0, 14, 0)
 
 #define __PMEV_op2(n)                  ((n) & 0x7)
 #define __CNTR_CRm(n)                  (0x8 | (((n) >> 3) & 0x3))
index f8129c624b0709815cbd266d569bb12fb70e6047..f7ddd73a8c0fa2dabffd2782f674f3b484079875 100644 (file)
@@ -198,6 +198,15 @@ struct kvm_arm_copy_mte_tags {
        __u64 reserved[2];
 };
 
+/*
+ * Counter/Timer offset structure. Describe the virtual/physical offset.
+ * To be used with KVM_ARM_SET_COUNTER_OFFSET.
+ */
+struct kvm_arm_counter_offset {
+       __u64 counter_offset;
+       __u64 reserved;
+};
+
 #define KVM_ARM_TAGS_TO_GUEST          0
 #define KVM_ARM_TAGS_FROM_GUEST                1
 
@@ -372,6 +381,10 @@ enum {
 #endif
 };
 
+/* Device Control API on vm fd */
+#define KVM_ARM_VM_SMCCC_CTRL          0
+#define   KVM_ARM_VM_SMCCC_FILTER      0
+
 /* Device Control API: ARM VGIC */
 #define KVM_DEV_ARM_VGIC_GRP_ADDR      0
 #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
@@ -411,6 +424,8 @@ enum {
 #define KVM_ARM_VCPU_TIMER_CTRL                1
 #define   KVM_ARM_VCPU_TIMER_IRQ_VTIMER                0
 #define   KVM_ARM_VCPU_TIMER_IRQ_PTIMER                1
+#define   KVM_ARM_VCPU_TIMER_IRQ_HVTIMER       2
+#define   KVM_ARM_VCPU_TIMER_IRQ_HPTIMER       3
 #define KVM_ARM_VCPU_PVTIME_CTRL       2
 #define   KVM_ARM_VCPU_PVTIME_IPA      0
 
@@ -469,6 +484,27 @@ enum {
 /* run->fail_entry.hardware_entry_failure_reason codes. */
 #define KVM_EXIT_FAIL_ENTRY_CPU_UNSUPPORTED    (1ULL << 0)
 
+enum kvm_smccc_filter_action {
+       KVM_SMCCC_FILTER_HANDLE = 0,
+       KVM_SMCCC_FILTER_DENY,
+       KVM_SMCCC_FILTER_FWD_TO_USER,
+
+#ifdef __KERNEL__
+       NR_SMCCC_FILTER_ACTIONS
+#endif
+};
+
+struct kvm_smccc_filter {
+       __u32 base;
+       __u32 nr_functions;
+       __u8 action;
+       __u8 pad[15];
+};
+
+/* arm64-specific KVM_EXIT_HYPERCALL flags */
+#define KVM_HYPERCALL_EXIT_SMC         (1U << 0)
+#define KVM_HYPERCALL_EXIT_16BIT       (1U << 1)
+
 #endif
 
 #endif /* __ARM_KVM_H__ */
index 2e3e5513977733b7a324c4263218be2bfbeef580..c331c49a7d19c82417db648f82252c449c8eadf5 100644 (file)
@@ -2223,6 +2223,17 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .sign = FTR_UNSIGNED,
                .min_field_value = 1,
        },
+       {
+               .desc = "Enhanced Counter Virtualization (CNTPOFF)",
+               .capability = ARM64_HAS_ECV_CNTPOFF,
+               .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+               .matches = has_cpuid_feature,
+               .sys_reg = SYS_ID_AA64MMFR0_EL1,
+               .field_pos = ID_AA64MMFR0_EL1_ECV_SHIFT,
+               .field_width = 4,
+               .sign = FTR_UNSIGNED,
+               .min_field_value = ID_AA64MMFR0_EL1_ECV_CNTPOFF,
+       },
 #ifdef CONFIG_ARM64_PAN
        {
                .desc = "Privileged Access Never",
index 28d8a5dca5f129784b158589a3c4c1d231ffd6e9..d731b4655df8eb271c185f732c4ec5a725fb4905 100644 (file)
@@ -66,7 +66,7 @@
        .long   .Lefi_header_end - .L_head              // SizeOfHeaders
        .long   0                                       // CheckSum
        .short  IMAGE_SUBSYSTEM_EFI_APPLICATION         // Subsystem
-       .short  0                                       // DllCharacteristics
+       .short  IMAGE_DLL_CHARACTERISTICS_NX_COMPAT     // DllCharacteristics
        .quad   0                                       // SizeOfStackReserve
        .quad   0                                       // SizeOfStackCommit
        .quad   0                                       // SizeOfHeapReserve
index e1af4301b913d87024cc55f1d2aa4cbeb4d3f20c..05b022be885b6718f23c53e3be50c278befd6611 100644 (file)
@@ -16,6 +16,7 @@
 #include <asm/arch_timer.h>
 #include <asm/kvm_emulate.h>
 #include <asm/kvm_hyp.h>
+#include <asm/kvm_nested.h>
 
 #include <kvm/arm_vgic.h>
 #include <kvm/arm_arch_timer.h>
@@ -30,14 +31,11 @@ static u32 host_ptimer_irq_flags;
 
 static DEFINE_STATIC_KEY_FALSE(has_gic_active_state);
 
-static const struct kvm_irq_level default_ptimer_irq = {
-       .irq    = 30,
-       .level  = 1,
-};
-
-static const struct kvm_irq_level default_vtimer_irq = {
-       .irq    = 27,
-       .level  = 1,
+static const u8 default_ppi[] = {
+       [TIMER_PTIMER]  = 30,
+       [TIMER_VTIMER]  = 27,
+       [TIMER_HPTIMER] = 26,
+       [TIMER_HVTIMER] = 28,
 };
 
 static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx);
@@ -51,6 +49,24 @@ static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
 static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
                              struct arch_timer_context *timer,
                              enum kvm_arch_timer_regs treg);
+static bool kvm_arch_timer_get_input_level(int vintid);
+
+static struct irq_ops arch_timer_irq_ops = {
+       .get_input_level = kvm_arch_timer_get_input_level,
+};
+
+static bool has_cntpoff(void)
+{
+       return (has_vhe() && cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF));
+}
+
+static int nr_timers(struct kvm_vcpu *vcpu)
+{
+       if (!vcpu_has_nv(vcpu))
+               return NR_KVM_EL0_TIMERS;
+
+       return NR_KVM_TIMERS;
+}
 
 u32 timer_get_ctl(struct arch_timer_context *ctxt)
 {
@@ -61,6 +77,10 @@ u32 timer_get_ctl(struct arch_timer_context *ctxt)
                return __vcpu_sys_reg(vcpu, CNTV_CTL_EL0);
        case TIMER_PTIMER:
                return __vcpu_sys_reg(vcpu, CNTP_CTL_EL0);
+       case TIMER_HVTIMER:
+               return __vcpu_sys_reg(vcpu, CNTHV_CTL_EL2);
+       case TIMER_HPTIMER:
+               return __vcpu_sys_reg(vcpu, CNTHP_CTL_EL2);
        default:
                WARN_ON(1);
                return 0;
@@ -76,6 +96,10 @@ u64 timer_get_cval(struct arch_timer_context *ctxt)
                return __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0);
        case TIMER_PTIMER:
                return __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0);
+       case TIMER_HVTIMER:
+               return __vcpu_sys_reg(vcpu, CNTHV_CVAL_EL2);
+       case TIMER_HPTIMER:
+               return __vcpu_sys_reg(vcpu, CNTHP_CVAL_EL2);
        default:
                WARN_ON(1);
                return 0;
@@ -84,10 +108,17 @@ u64 timer_get_cval(struct arch_timer_context *ctxt)
 
 static u64 timer_get_offset(struct arch_timer_context *ctxt)
 {
+       u64 offset = 0;
+
+       if (!ctxt)
+               return 0;
+
        if (ctxt->offset.vm_offset)
-               return *ctxt->offset.vm_offset;
+               offset += *ctxt->offset.vm_offset;
+       if (ctxt->offset.vcpu_offset)
+               offset += *ctxt->offset.vcpu_offset;
 
-       return 0;
+       return offset;
 }
 
 static void timer_set_ctl(struct arch_timer_context *ctxt, u32 ctl)
@@ -101,6 +132,12 @@ static void timer_set_ctl(struct arch_timer_context *ctxt, u32 ctl)
        case TIMER_PTIMER:
                __vcpu_sys_reg(vcpu, CNTP_CTL_EL0) = ctl;
                break;
+       case TIMER_HVTIMER:
+               __vcpu_sys_reg(vcpu, CNTHV_CTL_EL2) = ctl;
+               break;
+       case TIMER_HPTIMER:
+               __vcpu_sys_reg(vcpu, CNTHP_CTL_EL2) = ctl;
+               break;
        default:
                WARN_ON(1);
        }
@@ -117,6 +154,12 @@ static void timer_set_cval(struct arch_timer_context *ctxt, u64 cval)
        case TIMER_PTIMER:
                __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = cval;
                break;
+       case TIMER_HVTIMER:
+               __vcpu_sys_reg(vcpu, CNTHV_CVAL_EL2) = cval;
+               break;
+       case TIMER_HPTIMER:
+               __vcpu_sys_reg(vcpu, CNTHP_CVAL_EL2) = cval;
+               break;
        default:
                WARN_ON(1);
        }
@@ -139,13 +182,27 @@ u64 kvm_phys_timer_read(void)
 
 static void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map)
 {
-       if (has_vhe()) {
+       if (vcpu_has_nv(vcpu)) {
+               if (is_hyp_ctxt(vcpu)) {
+                       map->direct_vtimer = vcpu_hvtimer(vcpu);
+                       map->direct_ptimer = vcpu_hptimer(vcpu);
+                       map->emul_vtimer = vcpu_vtimer(vcpu);
+                       map->emul_ptimer = vcpu_ptimer(vcpu);
+               } else {
+                       map->direct_vtimer = vcpu_vtimer(vcpu);
+                       map->direct_ptimer = vcpu_ptimer(vcpu);
+                       map->emul_vtimer = vcpu_hvtimer(vcpu);
+                       map->emul_ptimer = vcpu_hptimer(vcpu);
+               }
+       } else if (has_vhe()) {
                map->direct_vtimer = vcpu_vtimer(vcpu);
                map->direct_ptimer = vcpu_ptimer(vcpu);
+               map->emul_vtimer = NULL;
                map->emul_ptimer = NULL;
        } else {
                map->direct_vtimer = vcpu_vtimer(vcpu);
                map->direct_ptimer = NULL;
+               map->emul_vtimer = NULL;
                map->emul_ptimer = vcpu_ptimer(vcpu);
        }
 
@@ -212,7 +269,7 @@ static u64 kvm_counter_compute_delta(struct arch_timer_context *timer_ctx,
                ns = cyclecounter_cyc2ns(timecounter->cc,
                                         val - now,
                                         timecounter->mask,
-                                        &timecounter->frac);
+                                        &timer_ctx->ns_frac);
                return ns;
        }
 
@@ -240,8 +297,11 @@ static bool vcpu_has_wfit_active(struct kvm_vcpu *vcpu)
 
 static u64 wfit_delay_ns(struct kvm_vcpu *vcpu)
 {
-       struct arch_timer_context *ctx = vcpu_vtimer(vcpu);
        u64 val = vcpu_get_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu));
+       struct arch_timer_context *ctx;
+
+       ctx = (vcpu_has_nv(vcpu) && is_hyp_ctxt(vcpu)) ? vcpu_hvtimer(vcpu)
+                                                      : vcpu_vtimer(vcpu);
 
        return kvm_counter_compute_delta(ctx, val);
 }
@@ -255,7 +315,7 @@ static u64 kvm_timer_earliest_exp(struct kvm_vcpu *vcpu)
        u64 min_delta = ULLONG_MAX;
        int i;
 
-       for (i = 0; i < NR_KVM_TIMERS; i++) {
+       for (i = 0; i < nr_timers(vcpu); i++) {
                struct arch_timer_context *ctx = &vcpu->arch.timer_cpu.timers[i];
 
                WARN(ctx->loaded, "timer %d loaded\n", i);
@@ -338,9 +398,11 @@ static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)
 
                switch (index) {
                case TIMER_VTIMER:
+               case TIMER_HVTIMER:
                        cnt_ctl = read_sysreg_el0(SYS_CNTV_CTL);
                        break;
                case TIMER_PTIMER:
+               case TIMER_HPTIMER:
                        cnt_ctl = read_sysreg_el0(SYS_CNTP_CTL);
                        break;
                case NR_KVM_TIMERS:
@@ -392,12 +454,12 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
        int ret;
 
        timer_ctx->irq.level = new_level;
-       trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_ctx->irq.irq,
+       trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_irq(timer_ctx),
                                   timer_ctx->irq.level);
 
        if (!userspace_irqchip(vcpu->kvm)) {
                ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
-                                         timer_ctx->irq.irq,
+                                         timer_irq(timer_ctx),
                                          timer_ctx->irq.level,
                                          timer_ctx);
                WARN_ON(ret);
@@ -432,6 +494,12 @@ static void set_cntvoff(u64 cntvoff)
        kvm_call_hyp(__kvm_timer_set_cntvoff, cntvoff);
 }
 
+static void set_cntpoff(u64 cntpoff)
+{
+       if (has_cntpoff())
+               write_sysreg_s(cntpoff, SYS_CNTPOFF_EL2);
+}
+
 static void timer_save_state(struct arch_timer_context *ctx)
 {
        struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
@@ -447,7 +515,10 @@ static void timer_save_state(struct arch_timer_context *ctx)
                goto out;
 
        switch (index) {
+               u64 cval;
+
        case TIMER_VTIMER:
+       case TIMER_HVTIMER:
                timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTV_CTL));
                timer_set_cval(ctx, read_sysreg_el0(SYS_CNTV_CVAL));
 
@@ -473,13 +544,20 @@ static void timer_save_state(struct arch_timer_context *ctx)
                set_cntvoff(0);
                break;
        case TIMER_PTIMER:
+       case TIMER_HPTIMER:
                timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTP_CTL));
-               timer_set_cval(ctx, read_sysreg_el0(SYS_CNTP_CVAL));
+               cval = read_sysreg_el0(SYS_CNTP_CVAL);
+
+               if (!has_cntpoff())
+                       cval -= timer_get_offset(ctx);
+
+               timer_set_cval(ctx, cval);
 
                /* Disable the timer */
                write_sysreg_el0(0, SYS_CNTP_CTL);
                isb();
 
+               set_cntpoff(0);
                break;
        case NR_KVM_TIMERS:
                BUG();
@@ -510,6 +588,7 @@ static void kvm_timer_blocking(struct kvm_vcpu *vcpu)
         */
        if (!kvm_timer_irq_can_fire(map.direct_vtimer) &&
            !kvm_timer_irq_can_fire(map.direct_ptimer) &&
+           !kvm_timer_irq_can_fire(map.emul_vtimer) &&
            !kvm_timer_irq_can_fire(map.emul_ptimer) &&
            !vcpu_has_wfit_active(vcpu))
                return;
@@ -543,14 +622,23 @@ static void timer_restore_state(struct arch_timer_context *ctx)
                goto out;
 
        switch (index) {
+               u64 cval, offset;
+
        case TIMER_VTIMER:
+       case TIMER_HVTIMER:
                set_cntvoff(timer_get_offset(ctx));
                write_sysreg_el0(timer_get_cval(ctx), SYS_CNTV_CVAL);
                isb();
                write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTV_CTL);
                break;
        case TIMER_PTIMER:
-               write_sysreg_el0(timer_get_cval(ctx), SYS_CNTP_CVAL);
+       case TIMER_HPTIMER:
+               cval = timer_get_cval(ctx);
+               offset = timer_get_offset(ctx);
+               set_cntpoff(offset);
+               if (!has_cntpoff())
+                       cval += offset;
+               write_sysreg_el0(cval, SYS_CNTP_CVAL);
                isb();
                write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTP_CTL);
                break;
@@ -586,7 +674,7 @@ static void kvm_timer_vcpu_load_gic(struct arch_timer_context *ctx)
        kvm_timer_update_irq(ctx->vcpu, kvm_timer_should_fire(ctx), ctx);
 
        if (irqchip_in_kernel(vcpu->kvm))
-               phys_active = kvm_vgic_map_is_active(vcpu, ctx->irq.irq);
+               phys_active = kvm_vgic_map_is_active(vcpu, timer_irq(ctx));
 
        phys_active |= ctx->irq.level;
 
@@ -621,6 +709,128 @@ static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu)
                enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
 }
 
+/* If _pred is true, set bit in _set, otherwise set it in _clr */
+#define assign_clear_set_bit(_pred, _bit, _clr, _set)                  \
+       do {                                                            \
+               if (_pred)                                              \
+                       (_set) |= (_bit);                               \
+               else                                                    \
+                       (_clr) |= (_bit);                               \
+       } while (0)
+
+static void kvm_timer_vcpu_load_nested_switch(struct kvm_vcpu *vcpu,
+                                             struct timer_map *map)
+{
+       int hw, ret;
+
+       if (!irqchip_in_kernel(vcpu->kvm))
+               return;
+
+       /*
+        * We only ever unmap the vtimer irq on a VHE system that runs nested
+        * virtualization, in which case we have both a valid emul_vtimer,
+        * emul_ptimer, direct_vtimer, and direct_ptimer.
+        *
+        * Since this is called from kvm_timer_vcpu_load(), a change between
+        * vEL2 and vEL1/0 will have just happened, and the timer_map will
+        * represent this, and therefore we switch the emul/direct mappings
+        * below.
+        */
+       hw = kvm_vgic_get_map(vcpu, timer_irq(map->direct_vtimer));
+       if (hw < 0) {
+               kvm_vgic_unmap_phys_irq(vcpu, timer_irq(map->emul_vtimer));
+               kvm_vgic_unmap_phys_irq(vcpu, timer_irq(map->emul_ptimer));
+
+               ret = kvm_vgic_map_phys_irq(vcpu,
+                                           map->direct_vtimer->host_timer_irq,
+                                           timer_irq(map->direct_vtimer),
+                                           &arch_timer_irq_ops);
+               WARN_ON_ONCE(ret);
+               ret = kvm_vgic_map_phys_irq(vcpu,
+                                           map->direct_ptimer->host_timer_irq,
+                                           timer_irq(map->direct_ptimer),
+                                           &arch_timer_irq_ops);
+               WARN_ON_ONCE(ret);
+
+               /*
+                * The virtual offset behaviour is "interresting", as it
+                * always applies when HCR_EL2.E2H==0, but only when
+                * accessed from EL1 when HCR_EL2.E2H==1. So make sure we
+                * track E2H when putting the HV timer in "direct" mode.
+                */
+               if (map->direct_vtimer == vcpu_hvtimer(vcpu)) {
+                       struct arch_timer_offset *offs = &map->direct_vtimer->offset;
+
+                       if (vcpu_el2_e2h_is_set(vcpu))
+                               offs->vcpu_offset = NULL;
+                       else
+                               offs->vcpu_offset = &__vcpu_sys_reg(vcpu, CNTVOFF_EL2);
+               }
+       }
+}
+
+static void timer_set_traps(struct kvm_vcpu *vcpu, struct timer_map *map)
+{
+       bool tpt, tpc;
+       u64 clr, set;
+
+       /*
+        * No trapping gets configured here with nVHE. See
+        * __timer_enable_traps(), which is where the stuff happens.
+        */
+       if (!has_vhe())
+               return;
+
+       /*
+        * Our default policy is not to trap anything. As we progress
+        * within this function, reality kicks in and we start adding
+        * traps based on emulation requirements.
+        */
+       tpt = tpc = false;
+
+       /*
+        * We have two possibility to deal with a physical offset:
+        *
+        * - Either we have CNTPOFF (yay!) or the offset is 0:
+        *   we let the guest freely access the HW
+        *
+        * - or neither of these condition apply:
+        *   we trap accesses to the HW, but still use it
+        *   after correcting the physical offset
+        */
+       if (!has_cntpoff() && timer_get_offset(map->direct_ptimer))
+               tpt = tpc = true;
+
+       /*
+        * Apply the enable bits that the guest hypervisor has requested for
+        * its own guest. We can only add traps that wouldn't have been set
+        * above.
+        */
+       if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
+               u64 val = __vcpu_sys_reg(vcpu, CNTHCTL_EL2);
+
+               /* Use the VHE format for mental sanity */
+               if (!vcpu_el2_e2h_is_set(vcpu))
+                       val = (val & (CNTHCTL_EL1PCEN | CNTHCTL_EL1PCTEN)) << 10;
+
+               tpt |= !(val & (CNTHCTL_EL1PCEN << 10));
+               tpc |= !(val & (CNTHCTL_EL1PCTEN << 10));
+       }
+
+       /*
+        * Now that we have collected our requirements, compute the
+        * trap and enable bits.
+        */
+       set = 0;
+       clr = 0;
+
+       assign_clear_set_bit(tpt, CNTHCTL_EL1PCEN << 10, set, clr);
+       assign_clear_set_bit(tpc, CNTHCTL_EL1PCTEN << 10, set, clr);
+
+       /* This only happens on VHE, so use the CNTKCTL_EL1 accessor */
+       sysreg_clear_set(cntkctl_el1, clr, set);
+}
+
 void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
 {
        struct arch_timer_cpu *timer = vcpu_timer(vcpu);
@@ -632,6 +842,9 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
        get_timer_map(vcpu, &map);
 
        if (static_branch_likely(&has_gic_active_state)) {
+               if (vcpu_has_nv(vcpu))
+                       kvm_timer_vcpu_load_nested_switch(vcpu, &map);
+
                kvm_timer_vcpu_load_gic(map.direct_vtimer);
                if (map.direct_ptimer)
                        kvm_timer_vcpu_load_gic(map.direct_ptimer);
@@ -644,9 +857,12 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
        timer_restore_state(map.direct_vtimer);
        if (map.direct_ptimer)
                timer_restore_state(map.direct_ptimer);
-
+       if (map.emul_vtimer)
+               timer_emulate(map.emul_vtimer);
        if (map.emul_ptimer)
                timer_emulate(map.emul_ptimer);
+
+       timer_set_traps(vcpu, &map);
 }
 
 bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu)
@@ -689,6 +905,8 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
         * In any case, we re-schedule the hrtimer for the physical timer when
         * coming back to the VCPU thread in kvm_timer_vcpu_load().
         */
+       if (map.emul_vtimer)
+               soft_timer_cancel(&map.emul_vtimer->hrtimer);
        if (map.emul_ptimer)
                soft_timer_cancel(&map.emul_ptimer->hrtimer);
 
@@ -738,56 +956,89 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
         * resets the timer to be disabled and unmasked and is compliant with
         * the ARMv7 architecture.
         */
-       timer_set_ctl(vcpu_vtimer(vcpu), 0);
-       timer_set_ctl(vcpu_ptimer(vcpu), 0);
+       for (int i = 0; i < nr_timers(vcpu); i++)
+               timer_set_ctl(vcpu_get_timer(vcpu, i), 0);
+
+       /*
+        * A vcpu running at EL2 is in charge of the offset applied to
+        * the virtual timer, so use the physical VM offset, and point
+        * the vcpu offset to CNTVOFF_EL2.
+        */
+       if (vcpu_has_nv(vcpu)) {
+               struct arch_timer_offset *offs = &vcpu_vtimer(vcpu)->offset;
+
+               offs->vcpu_offset = &__vcpu_sys_reg(vcpu, CNTVOFF_EL2);
+               offs->vm_offset = &vcpu->kvm->arch.timer_data.poffset;
+       }
 
        if (timer->enabled) {
-               kvm_timer_update_irq(vcpu, false, vcpu_vtimer(vcpu));
-               kvm_timer_update_irq(vcpu, false, vcpu_ptimer(vcpu));
+               for (int i = 0; i < nr_timers(vcpu); i++)
+                       kvm_timer_update_irq(vcpu, false,
+                                            vcpu_get_timer(vcpu, i));
 
                if (irqchip_in_kernel(vcpu->kvm)) {
-                       kvm_vgic_reset_mapped_irq(vcpu, map.direct_vtimer->irq.irq);
+                       kvm_vgic_reset_mapped_irq(vcpu, timer_irq(map.direct_vtimer));
                        if (map.direct_ptimer)
-                               kvm_vgic_reset_mapped_irq(vcpu, map.direct_ptimer->irq.irq);
+                               kvm_vgic_reset_mapped_irq(vcpu, timer_irq(map.direct_ptimer));
                }
        }
 
+       if (map.emul_vtimer)
+               soft_timer_cancel(&map.emul_vtimer->hrtimer);
        if (map.emul_ptimer)
                soft_timer_cancel(&map.emul_ptimer->hrtimer);
 
        return 0;
 }
 
+static void timer_context_init(struct kvm_vcpu *vcpu, int timerid)
+{
+       struct arch_timer_context *ctxt = vcpu_get_timer(vcpu, timerid);
+       struct kvm *kvm = vcpu->kvm;
+
+       ctxt->vcpu = vcpu;
+
+       if (timerid == TIMER_VTIMER)
+               ctxt->offset.vm_offset = &kvm->arch.timer_data.voffset;
+       else
+               ctxt->offset.vm_offset = &kvm->arch.timer_data.poffset;
+
+       hrtimer_init(&ctxt->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
+       ctxt->hrtimer.function = kvm_hrtimer_expire;
+
+       switch (timerid) {
+       case TIMER_PTIMER:
+       case TIMER_HPTIMER:
+               ctxt->host_timer_irq = host_ptimer_irq;
+               break;
+       case TIMER_VTIMER:
+       case TIMER_HVTIMER:
+               ctxt->host_timer_irq = host_vtimer_irq;
+               break;
+       }
+}
+
 void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
 {
        struct arch_timer_cpu *timer = vcpu_timer(vcpu);
-       struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
-       struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
 
-       vtimer->vcpu = vcpu;
-       vtimer->offset.vm_offset = &vcpu->kvm->arch.timer_data.voffset;
-       ptimer->vcpu = vcpu;
+       for (int i = 0; i < NR_KVM_TIMERS; i++)
+               timer_context_init(vcpu, i);
 
-       /* Synchronize cntvoff across all vtimers of a VM. */
-       timer_set_offset(vtimer, kvm_phys_timer_read());
-       timer_set_offset(ptimer, 0);
+       /* Synchronize offsets across timers of a VM if not already provided */
+       if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, &vcpu->kvm->arch.flags)) {
+               timer_set_offset(vcpu_vtimer(vcpu), kvm_phys_timer_read());
+               timer_set_offset(vcpu_ptimer(vcpu), 0);
+       }
 
        hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
        timer->bg_timer.function = kvm_bg_timer_expire;
+}
 
-       hrtimer_init(&vtimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
-       hrtimer_init(&ptimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
-       vtimer->hrtimer.function = kvm_hrtimer_expire;
-       ptimer->hrtimer.function = kvm_hrtimer_expire;
-
-       vtimer->irq.irq = default_vtimer_irq.irq;
-       ptimer->irq.irq = default_ptimer_irq.irq;
-
-       vtimer->host_timer_irq = host_vtimer_irq;
-       ptimer->host_timer_irq = host_ptimer_irq;
-
-       vtimer->host_timer_irq_flags = host_vtimer_irq_flags;
-       ptimer->host_timer_irq_flags = host_ptimer_irq_flags;
+void kvm_timer_init_vm(struct kvm *kvm)
+{
+       for (int i = 0; i < NR_KVM_TIMERS; i++)
+               kvm->arch.timer_data.ppi[i] = default_ppi[i];
 }
 
 void kvm_timer_cpu_up(void)
@@ -814,8 +1065,11 @@ int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
                kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
                break;
        case KVM_REG_ARM_TIMER_CNT:
-               timer = vcpu_vtimer(vcpu);
-               timer_set_offset(timer, kvm_phys_timer_read() - value);
+               if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET,
+                             &vcpu->kvm->arch.flags)) {
+                       timer = vcpu_vtimer(vcpu);
+                       timer_set_offset(timer, kvm_phys_timer_read() - value);
+               }
                break;
        case KVM_REG_ARM_TIMER_CVAL:
                timer = vcpu_vtimer(vcpu);
@@ -825,6 +1079,13 @@ int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
                timer = vcpu_ptimer(vcpu);
                kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
                break;
+       case KVM_REG_ARM_PTIMER_CNT:
+               if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET,
+                             &vcpu->kvm->arch.flags)) {
+                       timer = vcpu_ptimer(vcpu);
+                       timer_set_offset(timer, kvm_phys_timer_read() - value);
+               }
+               break;
        case KVM_REG_ARM_PTIMER_CVAL:
                timer = vcpu_ptimer(vcpu);
                kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
@@ -902,6 +1163,10 @@ static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
                val = kvm_phys_timer_read() - timer_get_offset(timer);
                break;
 
+       case TIMER_REG_VOFF:
+               val = *timer->offset.vcpu_offset;
+               break;
+
        default:
                BUG();
        }
@@ -920,7 +1185,7 @@ u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu,
        get_timer_map(vcpu, &map);
        timer = vcpu_get_timer(vcpu, tmr);
 
-       if (timer == map.emul_ptimer)
+       if (timer == map.emul_vtimer || timer == map.emul_ptimer)
                return kvm_arm_timer_read(vcpu, timer, treg);
 
        preempt_disable();
@@ -952,6 +1217,10 @@ static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
                timer_set_cval(timer, val);
                break;
 
+       case TIMER_REG_VOFF:
+               *timer->offset.vcpu_offset = val;
+               break;
+
        default:
                BUG();
        }
@@ -967,7 +1236,7 @@ void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu,
 
        get_timer_map(vcpu, &map);
        timer = vcpu_get_timer(vcpu, tmr);
-       if (timer == map.emul_ptimer) {
+       if (timer == map.emul_vtimer || timer == map.emul_ptimer) {
                soft_timer_cancel(&timer->hrtimer);
                kvm_arm_timer_write(vcpu, timer, treg, val);
                timer_emulate(timer);
@@ -1047,10 +1316,6 @@ static const struct irq_domain_ops timer_domain_ops = {
        .free   = timer_irq_domain_free,
 };
 
-static struct irq_ops arch_timer_irq_ops = {
-       .get_input_level = kvm_arch_timer_get_input_level,
-};
-
 static void kvm_irq_fixup_flags(unsigned int virq, u32 *flags)
 {
        *flags = irq_get_trigger_type(virq);
@@ -1192,44 +1457,56 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
 
 static bool timer_irqs_are_valid(struct kvm_vcpu *vcpu)
 {
-       int vtimer_irq, ptimer_irq, ret;
-       unsigned long i;
+       u32 ppis = 0;
+       bool valid;
 
-       vtimer_irq = vcpu_vtimer(vcpu)->irq.irq;
-       ret = kvm_vgic_set_owner(vcpu, vtimer_irq, vcpu_vtimer(vcpu));
-       if (ret)
-               return false;
+       mutex_lock(&vcpu->kvm->arch.config_lock);
 
-       ptimer_irq = vcpu_ptimer(vcpu)->irq.irq;
-       ret = kvm_vgic_set_owner(vcpu, ptimer_irq, vcpu_ptimer(vcpu));
-       if (ret)
-               return false;
+       for (int i = 0; i < nr_timers(vcpu); i++) {
+               struct arch_timer_context *ctx;
+               int irq;
+
+               ctx = vcpu_get_timer(vcpu, i);
+               irq = timer_irq(ctx);
+               if (kvm_vgic_set_owner(vcpu, irq, ctx))
+                       break;
 
-       kvm_for_each_vcpu(i, vcpu, vcpu->kvm) {
-               if (vcpu_vtimer(vcpu)->irq.irq != vtimer_irq ||
-                   vcpu_ptimer(vcpu)->irq.irq != ptimer_irq)
-                       return false;
+               /*
+                * We know by construction that we only have PPIs, so
+                * all values are less than 32.
+                */
+               ppis |= BIT(irq);
        }
 
-       return true;
+       valid = hweight32(ppis) == nr_timers(vcpu);
+
+       if (valid)
+               set_bit(KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE, &vcpu->kvm->arch.flags);
+
+       mutex_unlock(&vcpu->kvm->arch.config_lock);
+
+       return valid;
 }
 
-bool kvm_arch_timer_get_input_level(int vintid)
+static bool kvm_arch_timer_get_input_level(int vintid)
 {
        struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
-       struct arch_timer_context *timer;
 
        if (WARN(!vcpu, "No vcpu context!\n"))
                return false;
 
-       if (vintid == vcpu_vtimer(vcpu)->irq.irq)
-               timer = vcpu_vtimer(vcpu);
-       else if (vintid == vcpu_ptimer(vcpu)->irq.irq)
-               timer = vcpu_ptimer(vcpu);
-       else
-               BUG();
+       for (int i = 0; i < nr_timers(vcpu); i++) {
+               struct arch_timer_context *ctx;
+
+               ctx = vcpu_get_timer(vcpu, i);
+               if (timer_irq(ctx) == vintid)
+                       return kvm_timer_should_fire(ctx);
+       }
+
+       /* A timer IRQ has fired, but no matching timer was found? */
+       WARN_RATELIMIT(1, "timer INTID%d unknown\n", vintid);
 
-       return kvm_timer_should_fire(timer);
+       return false;
 }
 
 int kvm_timer_enable(struct kvm_vcpu *vcpu)
@@ -1258,7 +1535,7 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
 
        ret = kvm_vgic_map_phys_irq(vcpu,
                                    map.direct_vtimer->host_timer_irq,
-                                   map.direct_vtimer->irq.irq,
+                                   timer_irq(map.direct_vtimer),
                                    &arch_timer_irq_ops);
        if (ret)
                return ret;
@@ -1266,7 +1543,7 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
        if (map.direct_ptimer) {
                ret = kvm_vgic_map_phys_irq(vcpu,
                                            map.direct_ptimer->host_timer_irq,
-                                           map.direct_ptimer->irq.irq,
+                                           timer_irq(map.direct_ptimer),
                                            &arch_timer_irq_ops);
        }
 
@@ -1278,45 +1555,17 @@ no_vgic:
        return 0;
 }
 
-/*
- * On VHE system, we only need to configure the EL2 timer trap register once,
- * not for every world switch.
- * The host kernel runs at EL2 with HCR_EL2.TGE == 1,
- * and this makes those bits have no effect for the host kernel execution.
- */
+/* If we have CNTPOFF, permanently set ECV to enable it */
 void kvm_timer_init_vhe(void)
 {
-       /* When HCR_EL2.E2H ==1, EL1PCEN and EL1PCTEN are shifted by 10 */
-       u32 cnthctl_shift = 10;
-       u64 val;
-
-       /*
-        * VHE systems allow the guest direct access to the EL1 physical
-        * timer/counter.
-        */
-       val = read_sysreg(cnthctl_el2);
-       val |= (CNTHCTL_EL1PCEN << cnthctl_shift);
-       val |= (CNTHCTL_EL1PCTEN << cnthctl_shift);
-       write_sysreg(val, cnthctl_el2);
-}
-
-static void set_timer_irqs(struct kvm *kvm, int vtimer_irq, int ptimer_irq)
-{
-       struct kvm_vcpu *vcpu;
-       unsigned long i;
-
-       kvm_for_each_vcpu(i, vcpu, kvm) {
-               vcpu_vtimer(vcpu)->irq.irq = vtimer_irq;
-               vcpu_ptimer(vcpu)->irq.irq = ptimer_irq;
-       }
+       if (cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF))
+               sysreg_clear_set(cntkctl_el1, 0, CNTHCTL_ECV);
 }
 
 int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
 {
        int __user *uaddr = (int __user *)(long)attr->addr;
-       struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
-       struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
-       int irq;
+       int irq, idx, ret = 0;
 
        if (!irqchip_in_kernel(vcpu->kvm))
                return -EINVAL;
@@ -1327,21 +1576,42 @@ int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
        if (!(irq_is_ppi(irq)))
                return -EINVAL;
 
-       if (vcpu->arch.timer_cpu.enabled)
-               return -EBUSY;
+       mutex_lock(&vcpu->kvm->arch.config_lock);
+
+       if (test_bit(KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE,
+                    &vcpu->kvm->arch.flags)) {
+               ret = -EBUSY;
+               goto out;
+       }
 
        switch (attr->attr) {
        case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
-               set_timer_irqs(vcpu->kvm, irq, ptimer->irq.irq);
+               idx = TIMER_VTIMER;
                break;
        case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
-               set_timer_irqs(vcpu->kvm, vtimer->irq.irq, irq);
+               idx = TIMER_PTIMER;
+               break;
+       case KVM_ARM_VCPU_TIMER_IRQ_HVTIMER:
+               idx = TIMER_HVTIMER;
+               break;
+       case KVM_ARM_VCPU_TIMER_IRQ_HPTIMER:
+               idx = TIMER_HPTIMER;
                break;
        default:
-               return -ENXIO;
+               ret = -ENXIO;
+               goto out;
        }
 
-       return 0;
+       /*
+        * We cannot validate the IRQ unicity before we run, so take it at
+        * face value. The verdict will be given on first vcpu run, for each
+        * vcpu. Yes this is late. Blame it on the stupid API.
+        */
+       vcpu->kvm->arch.timer_data.ppi[idx] = irq;
+
+out:
+       mutex_unlock(&vcpu->kvm->arch.config_lock);
+       return ret;
 }
 
 int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
@@ -1357,11 +1627,17 @@ int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
        case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
                timer = vcpu_ptimer(vcpu);
                break;
+       case KVM_ARM_VCPU_TIMER_IRQ_HVTIMER:
+               timer = vcpu_hvtimer(vcpu);
+               break;
+       case KVM_ARM_VCPU_TIMER_IRQ_HPTIMER:
+               timer = vcpu_hptimer(vcpu);
+               break;
        default:
                return -ENXIO;
        }
 
-       irq = timer->irq.irq;
+       irq = timer_irq(timer);
        return put_user(irq, uaddr);
 }
 
@@ -1370,8 +1646,42 @@ int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
        switch (attr->attr) {
        case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
        case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
+       case KVM_ARM_VCPU_TIMER_IRQ_HVTIMER:
+       case KVM_ARM_VCPU_TIMER_IRQ_HPTIMER:
                return 0;
        }
 
        return -ENXIO;
 }
+
+int kvm_vm_ioctl_set_counter_offset(struct kvm *kvm,
+                                   struct kvm_arm_counter_offset *offset)
+{
+       int ret = 0;
+
+       if (offset->reserved)
+               return -EINVAL;
+
+       mutex_lock(&kvm->lock);
+
+       if (lock_all_vcpus(kvm)) {
+               set_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, &kvm->arch.flags);
+
+               /*
+                * If userspace decides to set the offset using this
+                * API rather than merely restoring the counter
+                * values, the offset applies to both the virtual and
+                * physical views.
+                */
+               kvm->arch.timer_data.voffset = offset->counter_offset;
+               kvm->arch.timer_data.poffset = offset->counter_offset;
+
+               unlock_all_vcpus(kvm);
+       } else {
+               ret = -EBUSY;
+       }
+
+       mutex_unlock(&kvm->lock);
+
+       return ret;
+}
index a43e1cb3b7e97267bc22caf05c68b166acc2551d..95b715cdf6f340892a61d86dc9f1cfd9b1f47a40 100644 (file)
@@ -128,6 +128,16 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 {
        int ret;
 
+       mutex_init(&kvm->arch.config_lock);
+
+#ifdef CONFIG_LOCKDEP
+       /* Clue in lockdep that the config_lock must be taken inside kvm->lock */
+       mutex_lock(&kvm->lock);
+       mutex_lock(&kvm->arch.config_lock);
+       mutex_unlock(&kvm->arch.config_lock);
+       mutex_unlock(&kvm->lock);
+#endif
+
        ret = kvm_share_hyp(kvm, kvm + 1);
        if (ret)
                return ret;
@@ -148,6 +158,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 
        kvm_vgic_early_init(kvm);
 
+       kvm_timer_init_vm(kvm);
+
        /* The maximum number of VCPUs is limited by the host's GIC model */
        kvm->max_vcpus = kvm_arm_default_max_vcpus();
 
@@ -192,6 +204,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
        kvm_destroy_vcpus(kvm);
 
        kvm_unshare_hyp(kvm, kvm + 1);
+
+       kvm_arm_teardown_hypercalls(kvm);
 }
 
 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
@@ -220,6 +234,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        case KVM_CAP_VCPU_ATTRIBUTES:
        case KVM_CAP_PTP_KVM:
        case KVM_CAP_ARM_SYSTEM_SUSPEND:
+       case KVM_CAP_COUNTER_OFFSET:
                r = 1;
                break;
        case KVM_CAP_SET_GUEST_DEBUG2:
@@ -326,6 +341,16 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
 {
        int err;
 
+       spin_lock_init(&vcpu->arch.mp_state_lock);
+
+#ifdef CONFIG_LOCKDEP
+       /* Inform lockdep that the config_lock is acquired after vcpu->mutex */
+       mutex_lock(&vcpu->mutex);
+       mutex_lock(&vcpu->kvm->arch.config_lock);
+       mutex_unlock(&vcpu->kvm->arch.config_lock);
+       mutex_unlock(&vcpu->mutex);
+#endif
+
        /* Force users to call KVM_ARM_VCPU_INIT */
        vcpu->arch.target = -1;
        bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
@@ -443,34 +468,41 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
        vcpu->cpu = -1;
 }
 
-void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu)
+static void __kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu)
 {
-       vcpu->arch.mp_state.mp_state = KVM_MP_STATE_STOPPED;
+       WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
        kvm_make_request(KVM_REQ_SLEEP, vcpu);
        kvm_vcpu_kick(vcpu);
 }
 
+void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu)
+{
+       spin_lock(&vcpu->arch.mp_state_lock);
+       __kvm_arm_vcpu_power_off(vcpu);
+       spin_unlock(&vcpu->arch.mp_state_lock);
+}
+
 bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu)
 {
-       return vcpu->arch.mp_state.mp_state == KVM_MP_STATE_STOPPED;
+       return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_STOPPED;
 }
 
 static void kvm_arm_vcpu_suspend(struct kvm_vcpu *vcpu)
 {
-       vcpu->arch.mp_state.mp_state = KVM_MP_STATE_SUSPENDED;
+       WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_SUSPENDED);
        kvm_make_request(KVM_REQ_SUSPEND, vcpu);
        kvm_vcpu_kick(vcpu);
 }
 
 static bool kvm_arm_vcpu_suspended(struct kvm_vcpu *vcpu)
 {
-       return vcpu->arch.mp_state.mp_state == KVM_MP_STATE_SUSPENDED;
+       return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_SUSPENDED;
 }
 
 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
                                    struct kvm_mp_state *mp_state)
 {
-       *mp_state = vcpu->arch.mp_state;
+       *mp_state = READ_ONCE(vcpu->arch.mp_state);
 
        return 0;
 }
@@ -480,12 +512,14 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
 {
        int ret = 0;
 
+       spin_lock(&vcpu->arch.mp_state_lock);
+
        switch (mp_state->mp_state) {
        case KVM_MP_STATE_RUNNABLE:
-               vcpu->arch.mp_state = *mp_state;
+               WRITE_ONCE(vcpu->arch.mp_state, *mp_state);
                break;
        case KVM_MP_STATE_STOPPED:
-               kvm_arm_vcpu_power_off(vcpu);
+               __kvm_arm_vcpu_power_off(vcpu);
                break;
        case KVM_MP_STATE_SUSPENDED:
                kvm_arm_vcpu_suspend(vcpu);
@@ -494,6 +528,8 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
                ret = -EINVAL;
        }
 
+       spin_unlock(&vcpu->arch.mp_state_lock);
+
        return ret;
 }
 
@@ -593,9 +629,9 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
        if (kvm_vm_is_protected(kvm))
                kvm_call_hyp_nvhe(__pkvm_vcpu_init_traps, vcpu);
 
-       mutex_lock(&kvm->lock);
+       mutex_lock(&kvm->arch.config_lock);
        set_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags);
-       mutex_unlock(&kvm->lock);
+       mutex_unlock(&kvm->arch.config_lock);
 
        return ret;
 }
@@ -1210,10 +1246,14 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
        /*
         * Handle the "start in power-off" case.
         */
+       spin_lock(&vcpu->arch.mp_state_lock);
+
        if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
-               kvm_arm_vcpu_power_off(vcpu);
+               __kvm_arm_vcpu_power_off(vcpu);
        else
-               vcpu->arch.mp_state.mp_state = KVM_MP_STATE_RUNNABLE;
+               WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_RUNNABLE);
+
+       spin_unlock(&vcpu->arch.mp_state_lock);
 
        return 0;
 }
@@ -1439,10 +1479,31 @@ static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
        }
 }
 
+static int kvm_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+       switch (attr->group) {
+       case KVM_ARM_VM_SMCCC_CTRL:
+               return kvm_vm_smccc_has_attr(kvm, attr);
+       default:
+               return -ENXIO;
+       }
+}
+
+static int kvm_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+       switch (attr->group) {
+       case KVM_ARM_VM_SMCCC_CTRL:
+               return kvm_vm_smccc_set_attr(kvm, attr);
+       default:
+               return -ENXIO;
+       }
+}
+
 int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
 {
        struct kvm *kvm = filp->private_data;
        void __user *argp = (void __user *)arg;
+       struct kvm_device_attr attr;
 
        switch (ioctl) {
        case KVM_CREATE_IRQCHIP: {
@@ -1478,11 +1539,73 @@ int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
                        return -EFAULT;
                return kvm_vm_ioctl_mte_copy_tags(kvm, &copy_tags);
        }
+       case KVM_ARM_SET_COUNTER_OFFSET: {
+               struct kvm_arm_counter_offset offset;
+
+               if (copy_from_user(&offset, argp, sizeof(offset)))
+                       return -EFAULT;
+               return kvm_vm_ioctl_set_counter_offset(kvm, &offset);
+       }
+       case KVM_HAS_DEVICE_ATTR: {
+               if (copy_from_user(&attr, argp, sizeof(attr)))
+                       return -EFAULT;
+
+               return kvm_vm_has_attr(kvm, &attr);
+       }
+       case KVM_SET_DEVICE_ATTR: {
+               if (copy_from_user(&attr, argp, sizeof(attr)))
+                       return -EFAULT;
+
+               return kvm_vm_set_attr(kvm, &attr);
+       }
        default:
                return -EINVAL;
        }
 }
 
+/* unlocks vcpus from @vcpu_lock_idx and smaller */
+static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx)
+{
+       struct kvm_vcpu *tmp_vcpu;
+
+       for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
+               tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
+               mutex_unlock(&tmp_vcpu->mutex);
+       }
+}
+
+void unlock_all_vcpus(struct kvm *kvm)
+{
+       lockdep_assert_held(&kvm->lock);
+
+       unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1);
+}
+
+/* Returns true if all vcpus were locked, false otherwise */
+bool lock_all_vcpus(struct kvm *kvm)
+{
+       struct kvm_vcpu *tmp_vcpu;
+       unsigned long c;
+
+       lockdep_assert_held(&kvm->lock);
+
+       /*
+        * Any time a vcpu is in an ioctl (including running), the
+        * core KVM code tries to grab the vcpu->mutex.
+        *
+        * By grabbing the vcpu->mutex of all VCPUs we ensure that no
+        * other VCPUs can fiddle with the state while we access it.
+        */
+       kvm_for_each_vcpu(c, tmp_vcpu, kvm) {
+               if (!mutex_trylock(&tmp_vcpu->mutex)) {
+                       unlock_vcpus(kvm, c - 1);
+                       return false;
+               }
+       }
+
+       return true;
+}
+
 static unsigned long nvhe_percpu_size(void)
 {
        return (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_end) -
index 26a2ebc465ea72ee8ee20e52ccd448f778a2cc19..20280a5233f679cea9ddf7c68b94b30b5ae83b47 100644 (file)
@@ -590,11 +590,16 @@ static unsigned long num_core_regs(const struct kvm_vcpu *vcpu)
        return copy_core_reg_indices(vcpu, NULL);
 }
 
-/**
- * ARM64 versions of the TIMER registers, always available on arm64
- */
+static const u64 timer_reg_list[] = {
+       KVM_REG_ARM_TIMER_CTL,
+       KVM_REG_ARM_TIMER_CNT,
+       KVM_REG_ARM_TIMER_CVAL,
+       KVM_REG_ARM_PTIMER_CTL,
+       KVM_REG_ARM_PTIMER_CNT,
+       KVM_REG_ARM_PTIMER_CVAL,
+};
 
-#define NUM_TIMER_REGS 3
+#define NUM_TIMER_REGS ARRAY_SIZE(timer_reg_list)
 
 static bool is_timer_reg(u64 index)
 {
@@ -602,6 +607,9 @@ static bool is_timer_reg(u64 index)
        case KVM_REG_ARM_TIMER_CTL:
        case KVM_REG_ARM_TIMER_CNT:
        case KVM_REG_ARM_TIMER_CVAL:
+       case KVM_REG_ARM_PTIMER_CTL:
+       case KVM_REG_ARM_PTIMER_CNT:
+       case KVM_REG_ARM_PTIMER_CVAL:
                return true;
        }
        return false;
@@ -609,14 +617,11 @@ static bool is_timer_reg(u64 index)
 
 static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
 {
-       if (put_user(KVM_REG_ARM_TIMER_CTL, uindices))
-               return -EFAULT;
-       uindices++;
-       if (put_user(KVM_REG_ARM_TIMER_CNT, uindices))
-               return -EFAULT;
-       uindices++;
-       if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices))
-               return -EFAULT;
+       for (int i = 0; i < NUM_TIMER_REGS; i++) {
+               if (put_user(timer_reg_list[i], uindices))
+                       return -EFAULT;
+               uindices++;
+       }
 
        return 0;
 }
@@ -957,7 +962,9 @@ int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
 
        switch (attr->group) {
        case KVM_ARM_VCPU_PMU_V3_CTRL:
+               mutex_lock(&vcpu->kvm->arch.config_lock);
                ret = kvm_arm_pmu_v3_set_attr(vcpu, attr);
+               mutex_unlock(&vcpu->kvm->arch.config_lock);
                break;
        case KVM_ARM_VCPU_TIMER_CTRL:
                ret = kvm_arm_timer_set_attr(vcpu, attr);
index a798c0b4d7177020ee9ed28ecc2ee345e565ee2e..6dcd6604b6bc16c2230625f7361a5147f3ca6714 100644 (file)
@@ -36,8 +36,6 @@ static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u64 esr)
 
 static int handle_hvc(struct kvm_vcpu *vcpu)
 {
-       int ret;
-
        trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0),
                            kvm_vcpu_hvc_get_imm(vcpu));
        vcpu->stat.hvc_exit_stat++;
@@ -52,33 +50,29 @@ static int handle_hvc(struct kvm_vcpu *vcpu)
                return 1;
        }
 
-       ret = kvm_hvc_call_handler(vcpu);
-       if (ret < 0) {
-               vcpu_set_reg(vcpu, 0, ~0UL);
-               return 1;
-       }
-
-       return ret;
+       return kvm_smccc_call_handler(vcpu);
 }
 
 static int handle_smc(struct kvm_vcpu *vcpu)
 {
-       int ret;
-
        /*
         * "If an SMC instruction executed at Non-secure EL1 is
         * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
         * Trap exception, not a Secure Monitor Call exception [...]"
         *
         * We need to advance the PC after the trap, as it would
-        * otherwise return to the same address...
-        *
-        * Only handle SMCs from the virtual EL2 with an immediate of zero and
-        * skip it otherwise.
+        * otherwise return to the same address. Furthermore, pre-incrementing
+        * the PC before potentially exiting to userspace maintains the same
+        * abstraction for both SMCs and HVCs.
+        */
+       kvm_incr_pc(vcpu);
+
+       /*
+        * SMCs with a nonzero immediate are reserved according to DEN0028E 2.9
+        * "SMC and HVC immediate value".
         */
-       if (!vcpu_is_el2(vcpu) || kvm_vcpu_hvc_get_imm(vcpu)) {
+       if (kvm_vcpu_hvc_get_imm(vcpu)) {
                vcpu_set_reg(vcpu, 0, ~0UL);
-               kvm_incr_pc(vcpu);
                return 1;
        }
 
@@ -89,13 +83,7 @@ static int handle_smc(struct kvm_vcpu *vcpu)
         * at Non-secure EL1 is trapped to EL2 if HCR_EL2.TSC==1, rather than
         * being treated as UNDEFINED.
         */
-       ret = kvm_hvc_call_handler(vcpu);
-       if (ret < 0)
-               vcpu_set_reg(vcpu, 0, ~0UL);
-
-       kvm_incr_pc(vcpu);
-
-       return ret;
+       return kvm_smccc_call_handler(vcpu);
 }
 
 /*
index 07d37ff88a3f2a69352ae123a08cf37ebfcb273e..c41166f1a1dd7dd6c228dd7e0477424dd19be9ba 100644 (file)
@@ -26,6 +26,7 @@
 #include <asm/kvm_emulate.h>
 #include <asm/kvm_hyp.h>
 #include <asm/kvm_mmu.h>
+#include <asm/kvm_nested.h>
 #include <asm/fpsimd.h>
 #include <asm/debug-monitors.h>
 #include <asm/processor.h>
@@ -326,6 +327,55 @@ static bool kvm_hyp_handle_ptrauth(struct kvm_vcpu *vcpu, u64 *exit_code)
        return true;
 }
 
+static bool kvm_hyp_handle_cntpct(struct kvm_vcpu *vcpu)
+{
+       struct arch_timer_context *ctxt;
+       u32 sysreg;
+       u64 val;
+
+       /*
+        * We only get here for 64bit guests, 32bit guests will hit
+        * the long and winding road all the way to the standard
+        * handling. Yes, it sucks to be irrelevant.
+        */
+       sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
+
+       switch (sysreg) {
+       case SYS_CNTPCT_EL0:
+       case SYS_CNTPCTSS_EL0:
+               if (vcpu_has_nv(vcpu)) {
+                       if (is_hyp_ctxt(vcpu)) {
+                               ctxt = vcpu_hptimer(vcpu);
+                               break;
+                       }
+
+                       /* Check for guest hypervisor trapping */
+                       val = __vcpu_sys_reg(vcpu, CNTHCTL_EL2);
+                       if (!vcpu_el2_e2h_is_set(vcpu))
+                               val = (val & CNTHCTL_EL1PCTEN) << 10;
+
+                       if (!(val & (CNTHCTL_EL1PCTEN << 10)))
+                               return false;
+               }
+
+               ctxt = vcpu_ptimer(vcpu);
+               break;
+       default:
+               return false;
+       }
+
+       val = arch_timer_read_cntpct_el0();
+
+       if (ctxt->offset.vm_offset)
+               val -= *kern_hyp_va(ctxt->offset.vm_offset);
+       if (ctxt->offset.vcpu_offset)
+               val -= *kern_hyp_va(ctxt->offset.vcpu_offset);
+
+       vcpu_set_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu), val);
+       __kvm_skip_instr(vcpu);
+       return true;
+}
+
 static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
 {
        if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
@@ -339,6 +389,9 @@ static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
        if (esr_is_ptrauth_trap(kvm_vcpu_get_esr(vcpu)))
                return kvm_hyp_handle_ptrauth(vcpu, exit_code);
 
+       if (kvm_hyp_handle_cntpct(vcpu))
+               return true;
+
        return false;
 }
 
index 2673bde62faddf973b14c5a34bf47c57b9667338..d756b939f296913000551d8d0645a5c68d1b828d 100644 (file)
@@ -37,7 +37,6 @@ static void __debug_save_spe(u64 *pmscr_el1)
 
        /* Now drain all buffered data to memory */
        psb_csync();
-       dsb(nsh);
 }
 
 static void __debug_restore_spe(u64 pmscr_el1)
@@ -69,7 +68,6 @@ static void __debug_save_trace(u64 *trfcr_el1)
        isb();
        /* Drain the trace buffer to memory */
        tsb_csync();
-       dsb(nsh);
 }
 
 static void __debug_restore_trace(u64 trfcr_el1)
index 552653fa18be34b2cde379d04ad6acb27395e3c8..2e9ec4a2a4a323d238e29f8ac199da8ea5301768 100644 (file)
@@ -297,6 +297,13 @@ int __pkvm_prot_finalize(void)
        params->vttbr = kvm_get_vttbr(mmu);
        params->vtcr = host_mmu.arch.vtcr;
        params->hcr_el2 |= HCR_VM;
+
+       /*
+        * The CMO below not only cleans the updated params to the
+        * PoC, but also provides the DSB that ensures ongoing
+        * page-table walks that have started before we trapped to EL2
+        * have completed.
+        */
        kvm_flush_dcache_to_poc(params, sizeof(*params));
 
        write_sysreg(params->hcr_el2, hcr_el2);
index c2cb46ca4fb667e9885601e40a91721e896947e7..71fa16a0dc775890adbcebe3a2ade963734ef445 100644 (file)
@@ -272,6 +272,17 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
         */
        __debug_save_host_buffers_nvhe(vcpu);
 
+       /*
+        * We're about to restore some new MMU state. Make sure
+        * ongoing page-table walks that have started before we
+        * trapped to EL2 have completed. This also synchronises the
+        * above disabling of SPE and TRBE.
+        *
+        * See DDI0487I.a D8.1.5 "Out-of-context translation regimes",
+        * rule R_LFHQG and subsequent information statements.
+        */
+       dsb(nsh);
+
        __kvm_adjust_pc(vcpu);
 
        /*
@@ -306,6 +317,13 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
        __timer_disable_traps(vcpu);
        __hyp_vgic_save_state(vcpu);
 
+       /*
+        * Same thing as before the guest run: we're about to switch
+        * the MMU context, so let's make sure we don't have any
+        * ongoing EL1&0 translations.
+        */
+       dsb(nsh);
+
        __deactivate_traps(vcpu);
        __load_host_stage2();
 
index 9072e71693baba526ca16f2c737d4495154a7780..b185ac0dbd4707f612389c1dc6017f3bf0ec764d 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/kvm_host.h>
 
 #include <asm/kvm_hyp.h>
+#include <asm/kvm_mmu.h>
 
 void __kvm_timer_set_cntvoff(u64 cntvoff)
 {
@@ -35,14 +36,19 @@ void __timer_disable_traps(struct kvm_vcpu *vcpu)
  */
 void __timer_enable_traps(struct kvm_vcpu *vcpu)
 {
-       u64 val;
+       u64 clr = 0, set = 0;
 
        /*
         * Disallow physical timer access for the guest
-        * Physical counter access is allowed
+        * Physical counter access is allowed if no offset is enforced
+        * or running protected (we don't offset anything in this case).
         */
-       val = read_sysreg(cnthctl_el2);
-       val &= ~CNTHCTL_EL1PCEN;
-       val |= CNTHCTL_EL1PCTEN;
-       write_sysreg(val, cnthctl_el2);
+       clr = CNTHCTL_EL1PCEN;
+       if (is_protected_kvm_enabled() ||
+           !kern_hyp_va(vcpu->kvm)->arch.timer_data.poffset)
+               set |= CNTHCTL_EL1PCTEN;
+       else
+               clr |= CNTHCTL_EL1PCTEN;
+
+       sysreg_clear_set(cnthctl_el2, clr, set);
 }
index d296d617f589633924fa11eddfb75ace470d703b..978179133f4b97665ae6468b9e69939ac52f86c6 100644 (file)
@@ -15,8 +15,31 @@ struct tlb_inv_context {
 };
 
 static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
-                                 struct tlb_inv_context *cxt)
+                                 struct tlb_inv_context *cxt,
+                                 bool nsh)
 {
+       /*
+        * We have two requirements:
+        *
+        * - ensure that the page table updates are visible to all
+        *   CPUs, for which a dsb(DOMAIN-st) is what we need, DOMAIN
+        *   being either ish or nsh, depending on the invalidation
+        *   type.
+        *
+        * - complete any speculative page table walk started before
+        *   we trapped to EL2 so that we can mess with the MM
+        *   registers out of context, for which dsb(nsh) is enough
+        *
+        * The composition of these two barriers is a dsb(DOMAIN), and
+        * the 'nsh' parameter tracks the distinction between
+        * Inner-Shareable and Non-Shareable, as specified by the
+        * callers.
+        */
+       if (nsh)
+               dsb(nsh);
+       else
+               dsb(ish);
+
        if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
                u64 val;
 
@@ -60,10 +83,8 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
 {
        struct tlb_inv_context cxt;
 
-       dsb(ishst);
-
        /* Switch to requested VMID */
-       __tlb_switch_to_guest(mmu, &cxt);
+       __tlb_switch_to_guest(mmu, &cxt, false);
 
        /*
         * We could do so much better if we had the VA as well.
@@ -113,10 +134,8 @@ void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
 {
        struct tlb_inv_context cxt;
 
-       dsb(ishst);
-
        /* Switch to requested VMID */
-       __tlb_switch_to_guest(mmu, &cxt);
+       __tlb_switch_to_guest(mmu, &cxt, false);
 
        __tlbi(vmalls12e1is);
        dsb(ish);
@@ -130,7 +149,7 @@ void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
        struct tlb_inv_context cxt;
 
        /* Switch to requested VMID */
-       __tlb_switch_to_guest(mmu, &cxt);
+       __tlb_switch_to_guest(mmu, &cxt, false);
 
        __tlbi(vmalle1);
        asm volatile("ic iallu");
@@ -142,7 +161,8 @@ void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
 
 void __kvm_flush_vm_context(void)
 {
-       dsb(ishst);
+       /* Same remark as in __tlb_switch_to_guest() */
+       dsb(ish);
        __tlbi(alle1is);
 
        /*
index cd3f3117bf164b8ea618482870349a53a48844c1..3d868e84c7a0a33246f86b4ccadaed8b5abec548 100644 (file)
@@ -227,11 +227,10 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
 
        /*
         * When we exit from the guest we change a number of CPU configuration
-        * parameters, such as traps.  Make sure these changes take effect
-        * before running the host or additional guests.
+        * parameters, such as traps.  We rely on the isb() in kvm_call_hyp*()
+        * to make sure these changes take effect before running the host or
+        * additional guests.
         */
-       isb();
-
        return ret;
 }
 
index 7b44f6b3b547d17c00a81c998a0794b7bd781283..b35a178e7e0db0571ff6aba5a4fcaa08fc70749f 100644 (file)
@@ -13,6 +13,7 @@
 #include <asm/kvm_asm.h>
 #include <asm/kvm_emulate.h>
 #include <asm/kvm_hyp.h>
+#include <asm/kvm_nested.h>
 
 /*
  * VHE: Host and guest must save mdscr_el1 and sp_el0 (and the PC and
@@ -69,6 +70,17 @@ void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu)
        host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
        __sysreg_save_user_state(host_ctxt);
 
+       /*
+        * When running a normal EL1 guest, we only load a new vcpu
+        * after a context switch, which imvolves a DSB, so all
+        * speculative EL1&0 walks will have already completed.
+        * If running NV, the vcpu may transition between vEL1 and
+        * vEL2 without a context switch, so make sure we complete
+        * those walks before loading a new context.
+        */
+       if (vcpu_has_nv(vcpu))
+               dsb(nsh);
+
        /*
         * Load guest EL1 and user state
         *
index 5da884e11337a6d420e3dc71456b469057300d1c..2e16fc7b31bfa7c99eaa1a106e8ae3387735926e 100644 (file)
@@ -47,7 +47,7 @@ static void kvm_ptp_get_time(struct kvm_vcpu *vcpu, u64 *val)
                cycles = systime_snapshot.cycles - vcpu->kvm->arch.timer_data.voffset;
                break;
        case KVM_PTP_PHYS_COUNTER:
-               cycles = systime_snapshot.cycles;
+               cycles = systime_snapshot.cycles - vcpu->kvm->arch.timer_data.poffset;
                break;
        default:
                return;
@@ -65,7 +65,7 @@ static void kvm_ptp_get_time(struct kvm_vcpu *vcpu, u64 *val)
        val[3] = lower_32_bits(cycles);
 }
 
-static bool kvm_hvc_call_default_allowed(u32 func_id)
+static bool kvm_smccc_default_allowed(u32 func_id)
 {
        switch (func_id) {
        /*
@@ -93,7 +93,7 @@ static bool kvm_hvc_call_default_allowed(u32 func_id)
        }
 }
 
-static bool kvm_hvc_call_allowed(struct kvm_vcpu *vcpu, u32 func_id)
+static bool kvm_smccc_test_fw_bmap(struct kvm_vcpu *vcpu, u32 func_id)
 {
        struct kvm_smccc_features *smccc_feat = &vcpu->kvm->arch.smccc_feat;
 
@@ -117,20 +117,161 @@ static bool kvm_hvc_call_allowed(struct kvm_vcpu *vcpu, u32 func_id)
                return test_bit(KVM_REG_ARM_VENDOR_HYP_BIT_PTP,
                                &smccc_feat->vendor_hyp_bmap);
        default:
-               return kvm_hvc_call_default_allowed(func_id);
+               return false;
+       }
+}
+
+#define SMC32_ARCH_RANGE_BEGIN ARM_SMCCC_VERSION_FUNC_ID
+#define SMC32_ARCH_RANGE_END   ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,         \
+                                                  ARM_SMCCC_SMC_32,            \
+                                                  0, ARM_SMCCC_FUNC_MASK)
+
+#define SMC64_ARCH_RANGE_BEGIN ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,         \
+                                                  ARM_SMCCC_SMC_64,            \
+                                                  0, 0)
+#define SMC64_ARCH_RANGE_END   ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,         \
+                                                  ARM_SMCCC_SMC_64,            \
+                                                  0, ARM_SMCCC_FUNC_MASK)
+
+static void init_smccc_filter(struct kvm *kvm)
+{
+       int r;
+
+       mt_init(&kvm->arch.smccc_filter);
+
+       /*
+        * Prevent userspace from handling any SMCCC calls in the architecture
+        * range, avoiding the risk of misrepresenting Spectre mitigation status
+        * to the guest.
+        */
+       r = mtree_insert_range(&kvm->arch.smccc_filter,
+                              SMC32_ARCH_RANGE_BEGIN, SMC32_ARCH_RANGE_END,
+                              xa_mk_value(KVM_SMCCC_FILTER_HANDLE),
+                              GFP_KERNEL_ACCOUNT);
+       WARN_ON_ONCE(r);
+
+       r = mtree_insert_range(&kvm->arch.smccc_filter,
+                              SMC64_ARCH_RANGE_BEGIN, SMC64_ARCH_RANGE_END,
+                              xa_mk_value(KVM_SMCCC_FILTER_HANDLE),
+                              GFP_KERNEL_ACCOUNT);
+       WARN_ON_ONCE(r);
+
+}
+
+static int kvm_smccc_set_filter(struct kvm *kvm, struct kvm_smccc_filter __user *uaddr)
+{
+       const void *zero_page = page_to_virt(ZERO_PAGE(0));
+       struct kvm_smccc_filter filter;
+       u32 start, end;
+       int r;
+
+       if (copy_from_user(&filter, uaddr, sizeof(filter)))
+               return -EFAULT;
+
+       if (memcmp(filter.pad, zero_page, sizeof(filter.pad)))
+               return -EINVAL;
+
+       start = filter.base;
+       end = start + filter.nr_functions - 1;
+
+       if (end < start || filter.action >= NR_SMCCC_FILTER_ACTIONS)
+               return -EINVAL;
+
+       mutex_lock(&kvm->arch.config_lock);
+
+       if (kvm_vm_has_ran_once(kvm)) {
+               r = -EBUSY;
+               goto out_unlock;
        }
+
+       r = mtree_insert_range(&kvm->arch.smccc_filter, start, end,
+                              xa_mk_value(filter.action), GFP_KERNEL_ACCOUNT);
+       if (r)
+               goto out_unlock;
+
+       set_bit(KVM_ARCH_FLAG_SMCCC_FILTER_CONFIGURED, &kvm->arch.flags);
+
+out_unlock:
+       mutex_unlock(&kvm->arch.config_lock);
+       return r;
+}
+
+static u8 kvm_smccc_filter_get_action(struct kvm *kvm, u32 func_id)
+{
+       unsigned long idx = func_id;
+       void *val;
+
+       if (!test_bit(KVM_ARCH_FLAG_SMCCC_FILTER_CONFIGURED, &kvm->arch.flags))
+               return KVM_SMCCC_FILTER_HANDLE;
+
+       /*
+        * But where's the error handling, you say?
+        *
+        * mt_find() returns NULL if no entry was found, which just so happens
+        * to match KVM_SMCCC_FILTER_HANDLE.
+        */
+       val = mt_find(&kvm->arch.smccc_filter, &idx, idx);
+       return xa_to_value(val);
 }
 
-int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
+static u8 kvm_smccc_get_action(struct kvm_vcpu *vcpu, u32 func_id)
+{
+       /*
+        * Intervening actions in the SMCCC filter take precedence over the
+        * pseudo-firmware register bitmaps.
+        */
+       u8 action = kvm_smccc_filter_get_action(vcpu->kvm, func_id);
+       if (action != KVM_SMCCC_FILTER_HANDLE)
+               return action;
+
+       if (kvm_smccc_test_fw_bmap(vcpu, func_id) ||
+           kvm_smccc_default_allowed(func_id))
+               return KVM_SMCCC_FILTER_HANDLE;
+
+       return KVM_SMCCC_FILTER_DENY;
+}
+
+static void kvm_prepare_hypercall_exit(struct kvm_vcpu *vcpu, u32 func_id)
+{
+       u8 ec = ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
+       struct kvm_run *run = vcpu->run;
+       u64 flags = 0;
+
+       if (ec == ESR_ELx_EC_SMC32 || ec == ESR_ELx_EC_SMC64)
+               flags |= KVM_HYPERCALL_EXIT_SMC;
+
+       if (!kvm_vcpu_trap_il_is32bit(vcpu))
+               flags |= KVM_HYPERCALL_EXIT_16BIT;
+
+       run->exit_reason = KVM_EXIT_HYPERCALL;
+       run->hypercall = (typeof(run->hypercall)) {
+               .nr     = func_id,
+               .flags  = flags,
+       };
+}
+
+int kvm_smccc_call_handler(struct kvm_vcpu *vcpu)
 {
        struct kvm_smccc_features *smccc_feat = &vcpu->kvm->arch.smccc_feat;
        u32 func_id = smccc_get_function(vcpu);
        u64 val[4] = {SMCCC_RET_NOT_SUPPORTED};
        u32 feature;
+       u8 action;
        gpa_t gpa;
 
-       if (!kvm_hvc_call_allowed(vcpu, func_id))
+       action = kvm_smccc_get_action(vcpu, func_id);
+       switch (action) {
+       case KVM_SMCCC_FILTER_HANDLE:
+               break;
+       case KVM_SMCCC_FILTER_DENY:
+               goto out;
+       case KVM_SMCCC_FILTER_FWD_TO_USER:
+               kvm_prepare_hypercall_exit(vcpu, func_id);
+               return 0;
+       default:
+               WARN_RATELIMIT(1, "Unhandled SMCCC filter action: %d\n", action);
                goto out;
+       }
 
        switch (func_id) {
        case ARM_SMCCC_VERSION_FUNC_ID:
@@ -245,6 +386,13 @@ void kvm_arm_init_hypercalls(struct kvm *kvm)
        smccc_feat->std_bmap = KVM_ARM_SMCCC_STD_FEATURES;
        smccc_feat->std_hyp_bmap = KVM_ARM_SMCCC_STD_HYP_FEATURES;
        smccc_feat->vendor_hyp_bmap = KVM_ARM_SMCCC_VENDOR_HYP_FEATURES;
+
+       init_smccc_filter(kvm);
+}
+
+void kvm_arm_teardown_hypercalls(struct kvm *kvm)
+{
+       mtree_destroy(&kvm->arch.smccc_filter);
 }
 
 int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu)
@@ -377,17 +525,16 @@ static int kvm_arm_set_fw_reg_bmap(struct kvm_vcpu *vcpu, u64 reg_id, u64 val)
        if (val & ~fw_reg_features)
                return -EINVAL;
 
-       mutex_lock(&kvm->lock);
+       mutex_lock(&kvm->arch.config_lock);
 
-       if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags) &&
-           val != *fw_reg_bmap) {
+       if (kvm_vm_has_ran_once(kvm) && val != *fw_reg_bmap) {
                ret = -EBUSY;
                goto out;
        }
 
        WRITE_ONCE(*fw_reg_bmap, val);
 out:
-       mutex_unlock(&kvm->lock);
+       mutex_unlock(&kvm->arch.config_lock);
        return ret;
 }
 
@@ -479,3 +626,25 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 
        return -EINVAL;
 }
+
+int kvm_vm_smccc_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+       switch (attr->attr) {
+       case KVM_ARM_VM_SMCCC_FILTER:
+               return 0;
+       default:
+               return -ENXIO;
+       }
+}
+
+int kvm_vm_smccc_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+       void __user *uaddr = (void __user *)attr->addr;
+
+       switch (attr->attr) {
+       case KVM_ARM_VM_SMCCC_FILTER:
+               return kvm_smccc_set_filter(kvm, uaddr);
+       default:
+               return -ENXIO;
+       }
+}
index 24908400e190616f317b9e6725b4605c12d1c8a4..8402e5a1354e43808088c738cdb5d792af17edf8 100644 (file)
@@ -874,13 +874,13 @@ static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id)
        struct arm_pmu *arm_pmu;
        int ret = -ENXIO;
 
-       mutex_lock(&kvm->lock);
+       lockdep_assert_held(&kvm->arch.config_lock);
        mutex_lock(&arm_pmus_lock);
 
        list_for_each_entry(entry, &arm_pmus, entry) {
                arm_pmu = entry->arm_pmu;
                if (arm_pmu->pmu.type == pmu_id) {
-                       if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags) ||
+                       if (kvm_vm_has_ran_once(kvm) ||
                            (kvm->arch.pmu_filter && kvm->arch.arm_pmu != arm_pmu)) {
                                ret = -EBUSY;
                                break;
@@ -894,7 +894,6 @@ static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id)
        }
 
        mutex_unlock(&arm_pmus_lock);
-       mutex_unlock(&kvm->lock);
        return ret;
 }
 
@@ -902,22 +901,20 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
 {
        struct kvm *kvm = vcpu->kvm;
 
+       lockdep_assert_held(&kvm->arch.config_lock);
+
        if (!kvm_vcpu_has_pmu(vcpu))
                return -ENODEV;
 
        if (vcpu->arch.pmu.created)
                return -EBUSY;
 
-       mutex_lock(&kvm->lock);
        if (!kvm->arch.arm_pmu) {
                /* No PMU set, get the default one */
                kvm->arch.arm_pmu = kvm_pmu_probe_armpmu();
-               if (!kvm->arch.arm_pmu) {
-                       mutex_unlock(&kvm->lock);
+               if (!kvm->arch.arm_pmu)
                        return -ENODEV;
-               }
        }
-       mutex_unlock(&kvm->lock);
 
        switch (attr->attr) {
        case KVM_ARM_VCPU_PMU_V3_IRQ: {
@@ -961,19 +958,13 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
                     filter.action != KVM_PMU_EVENT_DENY))
                        return -EINVAL;
 
-               mutex_lock(&kvm->lock);
-
-               if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags)) {
-                       mutex_unlock(&kvm->lock);
+               if (kvm_vm_has_ran_once(kvm))
                        return -EBUSY;
-               }
 
                if (!kvm->arch.pmu_filter) {
                        kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL_ACCOUNT);
-                       if (!kvm->arch.pmu_filter) {
-                               mutex_unlock(&kvm->lock);
+                       if (!kvm->arch.pmu_filter)
                                return -ENOMEM;
-                       }
 
                        /*
                         * The default depends on the first applied filter.
@@ -992,8 +983,6 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
                else
                        bitmap_clear(kvm->arch.pmu_filter, filter.base_event, filter.nevents);
 
-               mutex_unlock(&kvm->lock);
-
                return 0;
        }
        case KVM_ARM_VCPU_PMU_V3_SET_PMU: {
index 7fbc4c1b9df049bd3a7535f22ff7bad5ae2e65ed..1f69b667332b2ba9f9560dd6cfec0d8ce580104e 100644 (file)
@@ -62,6 +62,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
        struct vcpu_reset_state *reset_state;
        struct kvm *kvm = source_vcpu->kvm;
        struct kvm_vcpu *vcpu = NULL;
+       int ret = PSCI_RET_SUCCESS;
        unsigned long cpu_id;
 
        cpu_id = smccc_get_arg1(source_vcpu);
@@ -76,11 +77,15 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
         */
        if (!vcpu)
                return PSCI_RET_INVALID_PARAMS;
+
+       spin_lock(&vcpu->arch.mp_state_lock);
        if (!kvm_arm_vcpu_stopped(vcpu)) {
                if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1)
-                       return PSCI_RET_ALREADY_ON;
+                       ret = PSCI_RET_ALREADY_ON;
                else
-                       return PSCI_RET_INVALID_PARAMS;
+                       ret = PSCI_RET_INVALID_PARAMS;
+
+               goto out_unlock;
        }
 
        reset_state = &vcpu->arch.reset_state;
@@ -96,7 +101,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
         */
        reset_state->r0 = smccc_get_arg3(source_vcpu);
 
-       WRITE_ONCE(reset_state->reset, true);
+       reset_state->reset = true;
        kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
 
        /*
@@ -105,10 +110,12 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
         */
        smp_wmb();
 
-       vcpu->arch.mp_state.mp_state = KVM_MP_STATE_RUNNABLE;
+       WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_RUNNABLE);
        kvm_vcpu_wake_up(vcpu);
 
-       return PSCI_RET_SUCCESS;
+out_unlock:
+       spin_unlock(&vcpu->arch.mp_state_lock);
+       return ret;
 }
 
 static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
@@ -168,8 +175,11 @@ static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type, u64 flags)
         * after this call is handled and before the VCPUs have been
         * re-initialized.
         */
-       kvm_for_each_vcpu(i, tmp, vcpu->kvm)
-               tmp->arch.mp_state.mp_state = KVM_MP_STATE_STOPPED;
+       kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
+               spin_lock(&tmp->arch.mp_state_lock);
+               WRITE_ONCE(tmp->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
+               spin_unlock(&tmp->arch.mp_state_lock);
+       }
        kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
 
        memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
@@ -229,7 +239,6 @@ static unsigned long kvm_psci_check_allowed_function(struct kvm_vcpu *vcpu, u32
 
 static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
 {
-       struct kvm *kvm = vcpu->kvm;
        u32 psci_fn = smccc_get_function(vcpu);
        unsigned long val;
        int ret = 1;
@@ -254,9 +263,7 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
                kvm_psci_narrow_to_32bit(vcpu);
                fallthrough;
        case PSCI_0_2_FN64_CPU_ON:
-               mutex_lock(&kvm->lock);
                val = kvm_psci_vcpu_on(vcpu);
-               mutex_unlock(&kvm->lock);
                break;
        case PSCI_0_2_FN_AFFINITY_INFO:
                kvm_psci_narrow_to_32bit(vcpu);
@@ -395,7 +402,6 @@ static int kvm_psci_1_x_call(struct kvm_vcpu *vcpu, u32 minor)
 
 static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
 {
-       struct kvm *kvm = vcpu->kvm;
        u32 psci_fn = smccc_get_function(vcpu);
        unsigned long val;
 
@@ -405,9 +411,7 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
                val = PSCI_RET_SUCCESS;
                break;
        case KVM_PSCI_FN_CPU_ON:
-               mutex_lock(&kvm->lock);
                val = kvm_psci_vcpu_on(vcpu);
-               mutex_unlock(&kvm->lock);
                break;
        default:
                val = PSCI_RET_NOT_SUPPORTED;
@@ -435,6 +439,7 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
 int kvm_psci_call(struct kvm_vcpu *vcpu)
 {
        u32 psci_fn = smccc_get_function(vcpu);
+       int version = kvm_psci_version(vcpu);
        unsigned long val;
 
        val = kvm_psci_check_allowed_function(vcpu, psci_fn);
@@ -443,7 +448,7 @@ int kvm_psci_call(struct kvm_vcpu *vcpu)
                return 1;
        }
 
-       switch (kvm_psci_version(vcpu)) {
+       switch (version) {
        case KVM_ARM_PSCI_1_1:
                return kvm_psci_1_x_call(vcpu, 1);
        case KVM_ARM_PSCI_1_0:
@@ -453,6 +458,8 @@ int kvm_psci_call(struct kvm_vcpu *vcpu)
        case KVM_ARM_PSCI_0_1:
                return kvm_psci_0_1_call(vcpu);
        default:
-               return -EINVAL;
+               WARN_ONCE(1, "Unknown PSCI version %d", version);
+               smccc_set_retval(vcpu, SMCCC_RET_NOT_SUPPORTED, 0, 0, 0);
+               return 1;
        }
 }
index 49a3257dec46d90d640456235f8f75dd24912482..b5dee8e57e77a4e8cf27262b65242be3f3c63b0c 100644 (file)
@@ -205,7 +205,7 @@ static int kvm_set_vm_width(struct kvm_vcpu *vcpu)
 
        is32bit = vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT);
 
-       lockdep_assert_held(&kvm->lock);
+       lockdep_assert_held(&kvm->arch.config_lock);
 
        if (test_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED, &kvm->arch.flags)) {
                /*
@@ -262,17 +262,18 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
        bool loaded;
        u32 pstate;
 
-       mutex_lock(&vcpu->kvm->lock);
+       mutex_lock(&vcpu->kvm->arch.config_lock);
        ret = kvm_set_vm_width(vcpu);
-       if (!ret) {
-               reset_state = vcpu->arch.reset_state;
-               WRITE_ONCE(vcpu->arch.reset_state.reset, false);
-       }
-       mutex_unlock(&vcpu->kvm->lock);
+       mutex_unlock(&vcpu->kvm->arch.config_lock);
 
        if (ret)
                return ret;
 
+       spin_lock(&vcpu->arch.mp_state_lock);
+       reset_state = vcpu->arch.reset_state;
+       vcpu->arch.reset_state.reset = false;
+       spin_unlock(&vcpu->arch.mp_state_lock);
+
        /* Reset PMU outside of the non-preemptible section */
        kvm_pmu_vcpu_reset(vcpu);
 
index 53749d3a0996d73646290c4ceeb96cc21511446d..feca77083a5c40fedf8535fdac56f6499ff168d4 100644 (file)
@@ -1139,6 +1139,12 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu,
                tmr = TIMER_PTIMER;
                treg = TIMER_REG_CVAL;
                break;
+       case SYS_CNTPCT_EL0:
+       case SYS_CNTPCTSS_EL0:
+       case SYS_AARCH32_CNTPCT:
+               tmr = TIMER_PTIMER;
+               treg = TIMER_REG_CNT;
+               break;
        default:
                print_sys_reg_msg(p, "%s", "Unhandled trapped timer register");
                kvm_inject_undefined(vcpu);
@@ -2075,6 +2081,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
        AMU_AMEVTYPER1_EL0(14),
        AMU_AMEVTYPER1_EL0(15),
 
+       { SYS_DESC(SYS_CNTPCT_EL0), access_arch_timer },
+       { SYS_DESC(SYS_CNTPCTSS_EL0), access_arch_timer },
        { SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer },
        { SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer },
        { SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer },
@@ -2525,10 +2533,12 @@ static const struct sys_reg_desc cp15_64_regs[] = {
        { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
        { CP15_PMU_SYS_REG(DIRECT, 0, 0, 9, 0), .access = access_pmu_evcntr },
        { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI1R */
+       { SYS_DESC(SYS_AARCH32_CNTPCT),       access_arch_timer },
        { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR1_EL1 },
        { Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */
        { Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */
        { SYS_DESC(SYS_AARCH32_CNTP_CVAL),    access_arch_timer },
+       { SYS_DESC(SYS_AARCH32_CNTPCTSS),     access_arch_timer },
 };
 
 static bool check_sysreg_table(const struct sys_reg_desc *table, unsigned int n,
index f3e46a9761256d2a6ba5713a606686479d2164da..6ce5c025218dfee7a45e9b789a8ff3b3e89f7785 100644 (file)
@@ -206,6 +206,7 @@ TRACE_EVENT(kvm_get_timer_map,
                __field(        unsigned long,          vcpu_id )
                __field(        int,                    direct_vtimer   )
                __field(        int,                    direct_ptimer   )
+               __field(        int,                    emul_vtimer     )
                __field(        int,                    emul_ptimer     )
        ),
 
@@ -214,14 +215,17 @@ TRACE_EVENT(kvm_get_timer_map,
                __entry->direct_vtimer          = arch_timer_ctx_index(map->direct_vtimer);
                __entry->direct_ptimer =
                        (map->direct_ptimer) ? arch_timer_ctx_index(map->direct_ptimer) : -1;
+               __entry->emul_vtimer =
+                       (map->emul_vtimer) ? arch_timer_ctx_index(map->emul_vtimer) : -1;
                __entry->emul_ptimer =
                        (map->emul_ptimer) ? arch_timer_ctx_index(map->emul_ptimer) : -1;
        ),
 
-       TP_printk("VCPU: %ld, dv: %d, dp: %d, ep: %d",
+       TP_printk("VCPU: %ld, dv: %d, dp: %d, ev: %d, ep: %d",
                  __entry->vcpu_id,
                  __entry->direct_vtimer,
                  __entry->direct_ptimer,
+                 __entry->emul_vtimer,
                  __entry->emul_ptimer)
 );
 
index 78cde687383ca8cd1c6396701deb0dfc552bddf5..07aa0437125a60b318f626abb556e35f635c11bc 100644 (file)
@@ -85,7 +85,7 @@ static void *vgic_debug_start(struct seq_file *s, loff_t *pos)
        struct kvm *kvm = s->private;
        struct vgic_state_iter *iter;
 
-       mutex_lock(&kvm->lock);
+       mutex_lock(&kvm->arch.config_lock);
        iter = kvm->arch.vgic.iter;
        if (iter) {
                iter = ERR_PTR(-EBUSY);
@@ -104,7 +104,7 @@ static void *vgic_debug_start(struct seq_file *s, loff_t *pos)
        if (end_of_vgic(iter))
                iter = NULL;
 out:
-       mutex_unlock(&kvm->lock);
+       mutex_unlock(&kvm->arch.config_lock);
        return iter;
 }
 
@@ -132,12 +132,12 @@ static void vgic_debug_stop(struct seq_file *s, void *v)
        if (IS_ERR(v))
                return;
 
-       mutex_lock(&kvm->lock);
+       mutex_lock(&kvm->arch.config_lock);
        iter = kvm->arch.vgic.iter;
        kfree(iter->lpi_array);
        kfree(iter);
        kvm->arch.vgic.iter = NULL;
-       mutex_unlock(&kvm->lock);
+       mutex_unlock(&kvm->arch.config_lock);
 }
 
 static void print_dist_state(struct seq_file *s, struct vgic_dist *dist)
index cd134db41a57cc980fa2a0b39aa9cc874870f33d..9d42c7cb2b588235abf9b314cf92bc52f61429b0 100644 (file)
@@ -74,9 +74,6 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
        unsigned long i;
        int ret;
 
-       if (irqchip_in_kernel(kvm))
-               return -EEXIST;
-
        /*
         * This function is also called by the KVM_CREATE_IRQCHIP handler,
         * which had no chance yet to check the availability of the GICv2
@@ -87,10 +84,20 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
                !kvm_vgic_global_state.can_emulate_gicv2)
                return -ENODEV;
 
+       /* Must be held to avoid race with vCPU creation */
+       lockdep_assert_held(&kvm->lock);
+
        ret = -EBUSY;
        if (!lock_all_vcpus(kvm))
                return ret;
 
+       mutex_lock(&kvm->arch.config_lock);
+
+       if (irqchip_in_kernel(kvm)) {
+               ret = -EEXIST;
+               goto out_unlock;
+       }
+
        kvm_for_each_vcpu(i, vcpu, kvm) {
                if (vcpu_has_run_once(vcpu))
                        goto out_unlock;
@@ -118,6 +125,7 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
                INIT_LIST_HEAD(&kvm->arch.vgic.rd_regions);
 
 out_unlock:
+       mutex_unlock(&kvm->arch.config_lock);
        unlock_all_vcpus(kvm);
        return ret;
 }
@@ -227,9 +235,9 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
         * KVM io device for the redistributor that belongs to this VCPU.
         */
        if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
-               mutex_lock(&vcpu->kvm->lock);
+               mutex_lock(&vcpu->kvm->arch.config_lock);
                ret = vgic_register_redist_iodev(vcpu);
-               mutex_unlock(&vcpu->kvm->lock);
+               mutex_unlock(&vcpu->kvm->arch.config_lock);
        }
        return ret;
 }
@@ -250,7 +258,6 @@ static void kvm_vgic_vcpu_enable(struct kvm_vcpu *vcpu)
  * The function is generally called when nr_spis has been explicitly set
  * by the guest through the KVM DEVICE API. If not nr_spis is set to 256.
  * vgic_initialized() returns true when this function has succeeded.
- * Must be called with kvm->lock held!
  */
 int vgic_init(struct kvm *kvm)
 {
@@ -259,6 +266,8 @@ int vgic_init(struct kvm *kvm)
        int ret = 0, i;
        unsigned long idx;
 
+       lockdep_assert_held(&kvm->arch.config_lock);
+
        if (vgic_initialized(kvm))
                return 0;
 
@@ -373,12 +382,13 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
        vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
 }
 
-/* To be called with kvm->lock held */
 static void __kvm_vgic_destroy(struct kvm *kvm)
 {
        struct kvm_vcpu *vcpu;
        unsigned long i;
 
+       lockdep_assert_held(&kvm->arch.config_lock);
+
        vgic_debug_destroy(kvm);
 
        kvm_for_each_vcpu(i, vcpu, kvm)
@@ -389,9 +399,9 @@ static void __kvm_vgic_destroy(struct kvm *kvm)
 
 void kvm_vgic_destroy(struct kvm *kvm)
 {
-       mutex_lock(&kvm->lock);
+       mutex_lock(&kvm->arch.config_lock);
        __kvm_vgic_destroy(kvm);
-       mutex_unlock(&kvm->lock);
+       mutex_unlock(&kvm->arch.config_lock);
 }
 
 /**
@@ -414,9 +424,9 @@ int vgic_lazy_init(struct kvm *kvm)
                if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2)
                        return -EBUSY;
 
-               mutex_lock(&kvm->lock);
+               mutex_lock(&kvm->arch.config_lock);
                ret = vgic_init(kvm);
-               mutex_unlock(&kvm->lock);
+               mutex_unlock(&kvm->arch.config_lock);
        }
 
        return ret;
@@ -441,7 +451,7 @@ int kvm_vgic_map_resources(struct kvm *kvm)
        if (likely(vgic_ready(kvm)))
                return 0;
 
-       mutex_lock(&kvm->lock);
+       mutex_lock(&kvm->arch.config_lock);
        if (vgic_ready(kvm))
                goto out;
 
@@ -459,7 +469,7 @@ int kvm_vgic_map_resources(struct kvm *kvm)
                dist->ready = true;
 
 out:
-       mutex_unlock(&kvm->lock);
+       mutex_unlock(&kvm->arch.config_lock);
        return ret;
 }
 
index 2642e9ce28199c871190fa15888c1b1a9f93a2ba..750e51e3779a31a0eda2f8223be233fb10355664 100644 (file)
@@ -1958,6 +1958,16 @@ static int vgic_its_create(struct kvm_device *dev, u32 type)
        mutex_init(&its->its_lock);
        mutex_init(&its->cmd_lock);
 
+       /* Yep, even more trickery for lock ordering... */
+#ifdef CONFIG_LOCKDEP
+       mutex_lock(&dev->kvm->arch.config_lock);
+       mutex_lock(&its->cmd_lock);
+       mutex_lock(&its->its_lock);
+       mutex_unlock(&its->its_lock);
+       mutex_unlock(&its->cmd_lock);
+       mutex_unlock(&dev->kvm->arch.config_lock);
+#endif
+
        its->vgic_its_base = VGIC_ADDR_UNDEF;
 
        INIT_LIST_HEAD(&its->device_list);
@@ -2045,6 +2055,13 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev,
 
        mutex_lock(&dev->kvm->lock);
 
+       if (!lock_all_vcpus(dev->kvm)) {
+               mutex_unlock(&dev->kvm->lock);
+               return -EBUSY;
+       }
+
+       mutex_lock(&dev->kvm->arch.config_lock);
+
        if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) {
                ret = -ENXIO;
                goto out;
@@ -2058,11 +2075,6 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev,
                goto out;
        }
 
-       if (!lock_all_vcpus(dev->kvm)) {
-               ret = -EBUSY;
-               goto out;
-       }
-
        addr = its->vgic_its_base + offset;
 
        len = region->access_flags & VGIC_ACCESS_64bit ? 8 : 4;
@@ -2076,8 +2088,9 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev,
        } else {
                *reg = region->its_read(dev->kvm, its, addr, len);
        }
-       unlock_all_vcpus(dev->kvm);
 out:
+       mutex_unlock(&dev->kvm->arch.config_lock);
+       unlock_all_vcpus(dev->kvm);
        mutex_unlock(&dev->kvm->lock);
        return ret;
 }
@@ -2749,14 +2762,15 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
                return 0;
 
        mutex_lock(&kvm->lock);
-       mutex_lock(&its->its_lock);
 
        if (!lock_all_vcpus(kvm)) {
-               mutex_unlock(&its->its_lock);
                mutex_unlock(&kvm->lock);
                return -EBUSY;
        }
 
+       mutex_lock(&kvm->arch.config_lock);
+       mutex_lock(&its->its_lock);
+
        switch (attr) {
        case KVM_DEV_ARM_ITS_CTRL_RESET:
                vgic_its_reset(kvm, its);
@@ -2769,8 +2783,9 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
                break;
        }
 
-       unlock_all_vcpus(kvm);
        mutex_unlock(&its->its_lock);
+       mutex_unlock(&kvm->arch.config_lock);
+       unlock_all_vcpus(kvm);
        mutex_unlock(&kvm->lock);
        return ret;
 }
index edeac2380591f4d22614bba5e6752cd81ccfeb67..35cfa268fd5de7a0e3d1e1fa7eaa864715f77103 100644 (file)
@@ -46,7 +46,7 @@ int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev
        struct vgic_dist *vgic = &kvm->arch.vgic;
        int r;
 
-       mutex_lock(&kvm->lock);
+       mutex_lock(&kvm->arch.config_lock);
        switch (FIELD_GET(KVM_ARM_DEVICE_TYPE_MASK, dev_addr->id)) {
        case KVM_VGIC_V2_ADDR_TYPE_DIST:
                r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
@@ -68,7 +68,7 @@ int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev
                r = -ENODEV;
        }
 
-       mutex_unlock(&kvm->lock);
+       mutex_unlock(&kvm->arch.config_lock);
 
        return r;
 }
@@ -102,7 +102,7 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri
                if (get_user(addr, uaddr))
                        return -EFAULT;
 
-       mutex_lock(&kvm->lock);
+       mutex_lock(&kvm->arch.config_lock);
        switch (attr->attr) {
        case KVM_VGIC_V2_ADDR_TYPE_DIST:
                r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
@@ -191,7 +191,7 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri
        }
 
 out:
-       mutex_unlock(&kvm->lock);
+       mutex_unlock(&kvm->arch.config_lock);
 
        if (!r && !write)
                r =  put_user(addr, uaddr);
@@ -227,7 +227,7 @@ static int vgic_set_common_attr(struct kvm_device *dev,
                    (val & 31))
                        return -EINVAL;
 
-               mutex_lock(&dev->kvm->lock);
+               mutex_lock(&dev->kvm->arch.config_lock);
 
                if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_spis)
                        ret = -EBUSY;
@@ -235,16 +235,16 @@ static int vgic_set_common_attr(struct kvm_device *dev,
                        dev->kvm->arch.vgic.nr_spis =
                                val - VGIC_NR_PRIVATE_IRQS;
 
-               mutex_unlock(&dev->kvm->lock);
+               mutex_unlock(&dev->kvm->arch.config_lock);
 
                return ret;
        }
        case KVM_DEV_ARM_VGIC_GRP_CTRL: {
                switch (attr->attr) {
                case KVM_DEV_ARM_VGIC_CTRL_INIT:
-                       mutex_lock(&dev->kvm->lock);
+                       mutex_lock(&dev->kvm->arch.config_lock);
                        r = vgic_init(dev->kvm);
-                       mutex_unlock(&dev->kvm->lock);
+                       mutex_unlock(&dev->kvm->arch.config_lock);
                        return r;
                case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES:
                        /*
@@ -260,7 +260,10 @@ static int vgic_set_common_attr(struct kvm_device *dev,
                                mutex_unlock(&dev->kvm->lock);
                                return -EBUSY;
                        }
+
+                       mutex_lock(&dev->kvm->arch.config_lock);
                        r = vgic_v3_save_pending_tables(dev->kvm);
+                       mutex_unlock(&dev->kvm->arch.config_lock);
                        unlock_all_vcpus(dev->kvm);
                        mutex_unlock(&dev->kvm->lock);
                        return r;
@@ -342,44 +345,6 @@ int vgic_v2_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
        return 0;
 }
 
-/* unlocks vcpus from @vcpu_lock_idx and smaller */
-static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx)
-{
-       struct kvm_vcpu *tmp_vcpu;
-
-       for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
-               tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
-               mutex_unlock(&tmp_vcpu->mutex);
-       }
-}
-
-void unlock_all_vcpus(struct kvm *kvm)
-{
-       unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1);
-}
-
-/* Returns true if all vcpus were locked, false otherwise */
-bool lock_all_vcpus(struct kvm *kvm)
-{
-       struct kvm_vcpu *tmp_vcpu;
-       unsigned long c;
-
-       /*
-        * Any time a vcpu is run, vcpu_load is called which tries to grab the
-        * vcpu->mutex.  By grabbing the vcpu->mutex of all VCPUs we ensure
-        * that no other VCPUs are run and fiddle with the vgic state while we
-        * access it.
-        */
-       kvm_for_each_vcpu(c, tmp_vcpu, kvm) {
-               if (!mutex_trylock(&tmp_vcpu->mutex)) {
-                       unlock_vcpus(kvm, c - 1);
-                       return false;
-               }
-       }
-
-       return true;
-}
-
 /**
  * vgic_v2_attr_regs_access - allows user space to access VGIC v2 state
  *
@@ -411,15 +376,17 @@ static int vgic_v2_attr_regs_access(struct kvm_device *dev,
 
        mutex_lock(&dev->kvm->lock);
 
+       if (!lock_all_vcpus(dev->kvm)) {
+               mutex_unlock(&dev->kvm->lock);
+               return -EBUSY;
+       }
+
+       mutex_lock(&dev->kvm->arch.config_lock);
+
        ret = vgic_init(dev->kvm);
        if (ret)
                goto out;
 
-       if (!lock_all_vcpus(dev->kvm)) {
-               ret = -EBUSY;
-               goto out;
-       }
-
        switch (attr->group) {
        case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
                ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, &val);
@@ -432,8 +399,9 @@ static int vgic_v2_attr_regs_access(struct kvm_device *dev,
                break;
        }
 
-       unlock_all_vcpus(dev->kvm);
 out:
+       mutex_unlock(&dev->kvm->arch.config_lock);
+       unlock_all_vcpus(dev->kvm);
        mutex_unlock(&dev->kvm->lock);
 
        if (!ret && !is_write)
@@ -569,12 +537,14 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev,
 
        mutex_lock(&dev->kvm->lock);
 
-       if (unlikely(!vgic_initialized(dev->kvm))) {
-               ret = -EBUSY;
-               goto out;
+       if (!lock_all_vcpus(dev->kvm)) {
+               mutex_unlock(&dev->kvm->lock);
+               return -EBUSY;
        }
 
-       if (!lock_all_vcpus(dev->kvm)) {
+       mutex_lock(&dev->kvm->arch.config_lock);
+
+       if (unlikely(!vgic_initialized(dev->kvm))) {
                ret = -EBUSY;
                goto out;
        }
@@ -609,8 +579,9 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev,
                break;
        }
 
-       unlock_all_vcpus(dev->kvm);
 out:
+       mutex_unlock(&dev->kvm->arch.config_lock);
+       unlock_all_vcpus(dev->kvm);
        mutex_unlock(&dev->kvm->lock);
 
        if (!ret && uaccess && !is_write) {
index 91201f7430339e37c4776e20e0973949802f3683..472b18ac92a242bd922cc1a3f58a56326de2c40b 100644 (file)
@@ -111,7 +111,7 @@ static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu,
        case GICD_CTLR: {
                bool was_enabled, is_hwsgi;
 
-               mutex_lock(&vcpu->kvm->lock);
+               mutex_lock(&vcpu->kvm->arch.config_lock);
 
                was_enabled = dist->enabled;
                is_hwsgi = dist->nassgireq;
@@ -139,7 +139,7 @@ static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu,
                else if (!was_enabled && dist->enabled)
                        vgic_kick_vcpus(vcpu->kvm);
 
-               mutex_unlock(&vcpu->kvm->lock);
+               mutex_unlock(&vcpu->kvm->arch.config_lock);
                break;
        }
        case GICD_TYPER:
index e67b3b2c80440273336c4c9ef7cd1671e183d6c1..1939c94e0b248801800ea00c62f081f2b0c9f62d 100644 (file)
@@ -530,13 +530,13 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
        u32 val;
 
-       mutex_lock(&vcpu->kvm->lock);
+       mutex_lock(&vcpu->kvm->arch.config_lock);
        vgic_access_active_prepare(vcpu, intid);
 
        val = __vgic_mmio_read_active(vcpu, addr, len);
 
        vgic_access_active_finish(vcpu, intid);
-       mutex_unlock(&vcpu->kvm->lock);
+       mutex_unlock(&vcpu->kvm->arch.config_lock);
 
        return val;
 }
@@ -625,13 +625,13 @@ void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
 {
        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 
-       mutex_lock(&vcpu->kvm->lock);
+       mutex_lock(&vcpu->kvm->arch.config_lock);
        vgic_access_active_prepare(vcpu, intid);
 
        __vgic_mmio_write_cactive(vcpu, addr, len, val);
 
        vgic_access_active_finish(vcpu, intid);
-       mutex_unlock(&vcpu->kvm->lock);
+       mutex_unlock(&vcpu->kvm->arch.config_lock);
 }
 
 int vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu,
@@ -662,13 +662,13 @@ void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
 {
        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 
-       mutex_lock(&vcpu->kvm->lock);
+       mutex_lock(&vcpu->kvm->arch.config_lock);
        vgic_access_active_prepare(vcpu, intid);
 
        __vgic_mmio_write_sactive(vcpu, addr, len, val);
 
        vgic_access_active_finish(vcpu, intid);
-       mutex_unlock(&vcpu->kvm->lock);
+       mutex_unlock(&vcpu->kvm->arch.config_lock);
 }
 
 int vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu,
index a413718be92b8703c18ef8dacfe3dcfb1e3027a0..3bb0034780605772859f329620765d742e3fdf59 100644 (file)
@@ -232,9 +232,8 @@ int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq)
  * @kvm:       Pointer to the VM being initialized
  *
  * We may be called each time a vITS is created, or when the
- * vgic is initialized. This relies on kvm->lock to be
- * held. In both cases, the number of vcpus should now be
- * fixed.
+ * vgic is initialized. In both cases, the number of vcpus
+ * should now be fixed.
  */
 int vgic_v4_init(struct kvm *kvm)
 {
@@ -243,6 +242,8 @@ int vgic_v4_init(struct kvm *kvm)
        int nr_vcpus, ret;
        unsigned long i;
 
+       lockdep_assert_held(&kvm->arch.config_lock);
+
        if (!kvm_vgic_global_state.has_gicv4)
                return 0; /* Nothing to see here... move along. */
 
@@ -309,14 +310,14 @@ int vgic_v4_init(struct kvm *kvm)
 /**
  * vgic_v4_teardown - Free the GICv4 data structures
  * @kvm:       Pointer to the VM being destroyed
- *
- * Relies on kvm->lock to be held.
  */
 void vgic_v4_teardown(struct kvm *kvm)
 {
        struct its_vm *its_vm = &kvm->arch.vgic.its_vm;
        int i;
 
+       lockdep_assert_held(&kvm->arch.config_lock);
+
        if (!its_vm->vpes)
                return;
 
index d97e6080b42172a2db278e41bf4faa26072130db..8be4c1ebdec27a3e13c12914af80227c8d0eb32b 100644 (file)
@@ -24,11 +24,13 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
 /*
  * Locking order is always:
  * kvm->lock (mutex)
- *   its->cmd_lock (mutex)
- *     its->its_lock (mutex)
- *       vgic_cpu->ap_list_lock                must be taken with IRQs disabled
- *         kvm->lpi_list_lock          must be taken with IRQs disabled
- *           vgic_irq->irq_lock                must be taken with IRQs disabled
+ *   vcpu->mutex (mutex)
+ *     kvm->arch.config_lock (mutex)
+ *       its->cmd_lock (mutex)
+ *         its->its_lock (mutex)
+ *           vgic_cpu->ap_list_lock            must be taken with IRQs disabled
+ *             kvm->lpi_list_lock              must be taken with IRQs disabled
+ *               vgic_irq->irq_lock            must be taken with IRQs disabled
  *
  * As the ap_list_lock might be taken from the timer interrupt handler,
  * we have to disable IRQs before taking this lock and everything lower
@@ -573,6 +575,21 @@ int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid)
        return 0;
 }
 
+int kvm_vgic_get_map(struct kvm_vcpu *vcpu, unsigned int vintid)
+{
+       struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
+       unsigned long flags;
+       int ret = -1;
+
+       raw_spin_lock_irqsave(&irq->irq_lock, flags);
+       if (irq->hw)
+               ret = irq->hwintid;
+       raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
+
+       vgic_put_irq(vcpu->kvm, irq);
+       return ret;
+}
+
 /**
  * kvm_vgic_set_owner - Set the owner of an interrupt for a VM
  *
index 7f7f3c5ed85a0f157613a6fa9fd86c44e7dd05d3..f9923beedd2769e8cb63be01079093bc8952d7ac 100644 (file)
@@ -273,9 +273,6 @@ int vgic_init(struct kvm *kvm);
 void vgic_debug_init(struct kvm *kvm);
 void vgic_debug_destroy(struct kvm *kvm);
 
-bool lock_all_vcpus(struct kvm *kvm);
-void unlock_all_vcpus(struct kvm *kvm);
-
 static inline int vgic_v3_max_apr_idx(struct kvm_vcpu *vcpu)
 {
        struct vgic_cpu *cpu_if = &vcpu->arch.vgic_cpu;
index 37b1340e964664111b8d785d78dea3c2892d78a1..40ba95472594db7f053ec19b0d10af5088991e95 100644 (file)
@@ -23,6 +23,7 @@ HAS_DCPOP
 HAS_DIT
 HAS_E0PD
 HAS_ECV
+HAS_ECV_CNTPOFF
 HAS_EPAN
 HAS_GENERIC_AUTH
 HAS_GENERIC_AUTH_ARCH_QARMA3
index dd5a9c7e310f047b82292896a8002ddc3c65b3de..7063f1aacc54ff6a2c4cd36b020e1154533e152a 100644 (file)
@@ -1952,6 +1952,10 @@ Sysreg   CONTEXTIDR_EL2  3       4       13      0       1
 Fields CONTEXTIDR_ELx
 EndSysreg
 
+Sysreg CNTPOFF_EL2     3       4       14      0       6
+Field  63:0    PhysicalOffset
+EndSysreg
+
 Sysreg CPACR_EL12      3       5       1       0       2
 Fields CPACR_ELx
 EndSysreg
index 2803c9c21ef9b6031734e118b6818c5765e07ecf..957121a495f0b744d3881f9d59627a89e269bb6c 100644 (file)
@@ -757,7 +757,7 @@ struct kvm_mips_callbacks {
        int (*vcpu_run)(struct kvm_vcpu *vcpu);
        void (*vcpu_reenter)(struct kvm_vcpu *vcpu);
 };
-extern struct kvm_mips_callbacks *kvm_mips_callbacks;
+extern const struct kvm_mips_callbacks * const kvm_mips_callbacks;
 int kvm_mips_emulation_init(void);
 
 /* Debug: dump vcpu state */
index dafab003ea0df283cb862767770f2d3fac098880..3d21cbfa74435fe7739eeaea457810b129337d43 100644 (file)
@@ -3305,7 +3305,7 @@ static struct kvm_mips_callbacks kvm_vz_callbacks = {
 };
 
 /* FIXME: Get rid of the callbacks now that trap-and-emulate is gone. */
-struct kvm_mips_callbacks *kvm_mips_callbacks = &kvm_vz_callbacks;
+const struct kvm_mips_callbacks * const kvm_mips_callbacks = &kvm_vz_callbacks;
 
 int kvm_mips_emulation_init(void)
 {
index 92a968202ba7ccf9c6d8b2da54fea778186e9cf5..365d2720097cb0724ced2c978de21e3f919f5b69 100644 (file)
@@ -2,7 +2,7 @@
 #ifndef __ASM_KASAN_H
 #define __ASM_KASAN_H
 
-#ifdef CONFIG_KASAN
+#if defined(CONFIG_KASAN) && !defined(CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX)
 #define _GLOBAL_KASAN(fn)      _GLOBAL(__##fn)
 #define _GLOBAL_TOC_KASAN(fn)  _GLOBAL_TOC(__##fn)
 #define EXPORT_SYMBOL_KASAN(fn)        EXPORT_SYMBOL(__##fn)
index 2aa0e31e68844336a0fd37ac81bd2b97204b454f..60ba22770f51c867d50c3bbba32e192292c0b916 100644 (file)
@@ -30,11 +30,17 @@ extern int memcmp(const void *,const void *,__kernel_size_t);
 extern void * memchr(const void *,int,__kernel_size_t);
 void memcpy_flushcache(void *dest, const void *src, size_t size);
 
+#ifdef CONFIG_KASAN
+/* __mem variants are used by KASAN to implement instrumented meminstrinsics. */
+#ifdef CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX
+#define __memset memset
+#define __memcpy memcpy
+#define __memmove memmove
+#else /* CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX */
 void *__memset(void *s, int c, __kernel_size_t count);
 void *__memcpy(void *to, const void *from, __kernel_size_t n);
 void *__memmove(void *to, const void *from, __kernel_size_t n);
-
-#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
+#ifndef __SANITIZE_ADDRESS__
 /*
  * For files that are not instrumented (e.g. mm/slub.c) we
  * should use not instrumented version of mem* functions.
@@ -46,8 +52,9 @@ void *__memmove(void *to, const void *from, __kernel_size_t n);
 #ifndef __NO_FORTIFY
 #define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
 #endif
-
-#endif
+#endif /* !__SANITIZE_ADDRESS__ */
+#endif /* CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX */
+#endif /* CONFIG_KASAN */
 
 #ifdef CONFIG_PPC64
 #ifndef CONFIG_KASAN
index 5a319863f2890f4a6aca7ee9b25948fe659f3eea..69623b9045d55678ef43310c4090a1caf14fe840 100644 (file)
 # If you really need to reference something from prom_init.o add
 # it to the list below:
 
-grep "^CONFIG_KASAN=y$" ${KCONFIG_CONFIG} >/dev/null
-if [ $? -eq 0 ]
+has_renamed_memintrinsics()
+{
+       grep -q "^CONFIG_KASAN=y$" ${KCONFIG_CONFIG} && \
+               ! grep -q "^CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX=y" ${KCONFIG_CONFIG}
+}
+
+if has_renamed_memintrinsics
 then
        MEM_FUNCS="__memcpy __memset"
 else
index 2bef19cc1b98c659b38109ba47ea2811d0976b66..af46aa88422bf950cd1868dd4715c6adf2f6c5bb 100644 (file)
@@ -271,11 +271,16 @@ static bool access_error(bool is_write, bool is_exec, struct vm_area_struct *vma
        }
 
        /*
-        * Check for a read fault.  This could be caused by a read on an
-        * inaccessible page (i.e. PROT_NONE), or a Radix MMU execute-only page.
+        * VM_READ, VM_WRITE and VM_EXEC all imply read permissions, as
+        * defined in protection_map[].  Read faults can only be caused by
+        * a PROT_NONE mapping, or with a PROT_EXEC-only mapping on Radix.
         */
-       if (unlikely(!(vma->vm_flags & VM_READ)))
+       if (unlikely(!vma_is_accessible(vma)))
                return true;
+
+       if (unlikely(radix_enabled() && ((vma->vm_flags & VM_ACCESS_FLAGS) == VM_EXEC)))
+               return true;
+
        /*
         * We should ideally do the vma pkey access check here. But in the
         * fault path, handle_mm_fault() also does the same check. To avoid
index b481c5c8bae11c8e556b932ac505f8597ecb79ac..21b22bf16ce66b8e374f731d2f377104eb0071fc 100644 (file)
@@ -7,6 +7,7 @@ config PPC_PSERIES
        select OF_DYNAMIC
        select FORCE_PCI
        select PCI_MSI
+       select GENERIC_ALLOCATOR
        select PPC_XICS
        select PPC_XIVE_SPAPR
        select PPC_ICP_NATIVE
index c5e42cc376048dbc9d960a6275b40c983acb7618..5b182d1c196ceddc97e4fd9c131b2eec21ce0b23 100644 (file)
@@ -464,6 +464,28 @@ config TOOLCHAIN_HAS_ZIHINTPAUSE
        depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32ima_zihintpause)
        depends on LLD_VERSION >= 150000 || LD_VERSION >= 23600
 
+config TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI
+       def_bool y
+       # https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=aed44286efa8ae8717a77d94b51ac3614e2ca6dc
+       depends on AS_IS_GNU && AS_VERSION >= 23800
+       help
+         Newer binutils versions default to ISA spec version 20191213 which
+         moves some instructions from the I extension to the Zicsr and Zifencei
+         extensions.
+
+config TOOLCHAIN_NEEDS_OLD_ISA_SPEC
+       def_bool y
+       depends on TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI
+       # https://github.com/llvm/llvm-project/commit/22e199e6afb1263c943c0c0d4498694e15bf8a16
+       depends on CC_IS_CLANG && CLANG_VERSION < 170000
+       help
+         Certain versions of clang do not support zicsr and zifencei via -march
+         but newer versions of binutils require it for the reasons noted in the
+         help text of CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI. This
+         option causes an older ISA spec compatible with these older versions
+         of clang to be passed to GAS, which has the same result as passing zicsr
+         and zifencei to -march.
+
 config FPU
        bool "FPU support"
        default y
index 4de83b9b1772d14d766d6b3a14102709b9c96caa..b05e833a022d17094e67dff727cb9ae9aa4c6cfb 100644 (file)
@@ -57,10 +57,12 @@ riscv-march-$(CONFIG_ARCH_RV64I)    := rv64ima
 riscv-march-$(CONFIG_FPU)              := $(riscv-march-y)fd
 riscv-march-$(CONFIG_RISCV_ISA_C)      := $(riscv-march-y)c
 
-# Newer binutils versions default to ISA spec version 20191213 which moves some
-# instructions from the I extension to the Zicsr and Zifencei extensions.
-toolchain-need-zicsr-zifencei := $(call cc-option-yn, -march=$(riscv-march-y)_zicsr_zifencei)
-riscv-march-$(toolchain-need-zicsr-zifencei) := $(riscv-march-y)_zicsr_zifencei
+ifdef CONFIG_TOOLCHAIN_NEEDS_OLD_ISA_SPEC
+KBUILD_CFLAGS += -Wa,-misa-spec=2.2
+KBUILD_AFLAGS += -Wa,-misa-spec=2.2
+else
+riscv-march-$(CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI) := $(riscv-march-y)_zicsr_zifencei
+endif
 
 # Check if the toolchain supports Zihintpause extension
 riscv-march-$(CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE) := $(riscv-march-y)_zihintpause
index 5ff1f19fd45c29b4fc7d2c8b44ac4984caf1e75c..0099dc1161683ddd1e3c45460309e331b7e6b0a7 100644 (file)
@@ -19,8 +19,6 @@ typedef struct {
 #ifdef CONFIG_SMP
        /* A local icache flush is needed before user execution can resume. */
        cpumask_t icache_stale_mask;
-       /* A local tlb flush is needed before user execution can resume. */
-       cpumask_t tlb_stale_mask;
 #endif
 } mm_context_t;
 
index 907b9efd39a87dd3853c1f8f21f4baa2fdd1125c..a09196f8de688ea90123bb74fc21e080cde19f22 100644 (file)
@@ -12,6 +12,8 @@
 #include <asm/errata_list.h>
 
 #ifdef CONFIG_MMU
+extern unsigned long asid_mask;
+
 static inline void local_flush_tlb_all(void)
 {
        __asm__ __volatile__ ("sfence.vma" : : : "memory");
@@ -22,24 +24,6 @@ static inline void local_flush_tlb_page(unsigned long addr)
 {
        ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory"));
 }
-
-static inline void local_flush_tlb_all_asid(unsigned long asid)
-{
-       __asm__ __volatile__ ("sfence.vma x0, %0"
-                       :
-                       : "r" (asid)
-                       : "memory");
-}
-
-static inline void local_flush_tlb_page_asid(unsigned long addr,
-               unsigned long asid)
-{
-       __asm__ __volatile__ ("sfence.vma %0, %1"
-                       :
-                       : "r" (addr), "r" (asid)
-                       : "memory");
-}
-
 #else /* CONFIG_MMU */
 #define local_flush_tlb_all()                  do { } while (0)
 #define local_flush_tlb_page(addr)             do { } while (0)
index 80ce9caba8d225979426f01f9a636d11890f9de6..12e22e7330e7bd2f0feee680b143c407473f0a81 100644 (file)
@@ -22,7 +22,7 @@ DEFINE_STATIC_KEY_FALSE(use_asid_allocator);
 
 static unsigned long asid_bits;
 static unsigned long num_asids;
-static unsigned long asid_mask;
+unsigned long asid_mask;
 
 static atomic_long_t current_version;
 
@@ -196,16 +196,6 @@ switch_mm_fast:
 
        if (need_flush_tlb)
                local_flush_tlb_all();
-#ifdef CONFIG_SMP
-       else {
-               cpumask_t *mask = &mm->context.tlb_stale_mask;
-
-               if (cpumask_test_cpu(cpu, mask)) {
-                       cpumask_clear_cpu(cpu, mask);
-                       local_flush_tlb_all_asid(cntx & asid_mask);
-               }
-       }
-#endif
 }
 
 static void set_mm_noasid(struct mm_struct *mm)
@@ -215,12 +205,24 @@ static void set_mm_noasid(struct mm_struct *mm)
        local_flush_tlb_all();
 }
 
-static inline void set_mm(struct mm_struct *mm, unsigned int cpu)
+static inline void set_mm(struct mm_struct *prev,
+                         struct mm_struct *next, unsigned int cpu)
 {
-       if (static_branch_unlikely(&use_asid_allocator))
-               set_mm_asid(mm, cpu);
-       else
-               set_mm_noasid(mm);
+       /*
+        * The mm_cpumask indicates which harts' TLBs contain the virtual
+        * address mapping of the mm. Compared to noasid, using asid
+        * can't guarantee that stale TLB entries are invalidated because
+        * the asid mechanism wouldn't flush TLB for every switch_mm for
+        * performance. So when using asid, keep all CPUs footmarks in
+        * cpumask() until mm reset.
+        */
+       cpumask_set_cpu(cpu, mm_cpumask(next));
+       if (static_branch_unlikely(&use_asid_allocator)) {
+               set_mm_asid(next, cpu);
+       } else {
+               cpumask_clear_cpu(cpu, mm_cpumask(prev));
+               set_mm_noasid(next);
+       }
 }
 
 static int __init asids_init(void)
@@ -274,7 +276,8 @@ static int __init asids_init(void)
 }
 early_initcall(asids_init);
 #else
-static inline void set_mm(struct mm_struct *mm, unsigned int cpu)
+static inline void set_mm(struct mm_struct *prev,
+                         struct mm_struct *next, unsigned int cpu)
 {
        /* Nothing to do here when there is no MMU */
 }
@@ -327,10 +330,7 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
         */
        cpu = smp_processor_id();
 
-       cpumask_clear_cpu(cpu, mm_cpumask(prev));
-       cpumask_set_cpu(cpu, mm_cpumask(next));
-
-       set_mm(next, cpu);
+       set_mm(prev, next, cpu);
 
        flush_icache_deferred(next, cpu);
 }
index 460f785f6e09cd8fb1441f2fff49288f5256181e..d5f3e501dffb3a6dd2596682b26a011389ac2cb1 100644 (file)
@@ -143,6 +143,8 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
                no_context(regs, addr);
                return;
        }
+       if (pud_leaf(*pud_k))
+               goto flush_tlb;
 
        /*
         * Since the vmalloc area is global, it is unnecessary
@@ -153,6 +155,8 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
                no_context(regs, addr);
                return;
        }
+       if (pmd_leaf(*pmd_k))
+               goto flush_tlb;
 
        /*
         * Make sure the actual PTE exists as well to
@@ -172,6 +176,7 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
         * ordering constraint, not a cache flush; it is
         * necessary even after writing invalid entries.
         */
+flush_tlb:
        local_flush_tlb_page(addr);
 }
 
index ce7dfc81bb3fe386748557f44310aa4b1a86f3a9..ef701fa83f3685c6d497ccda59086e7b9f0da20b 100644 (file)
@@ -5,7 +5,23 @@
 #include <linux/sched.h>
 #include <asm/sbi.h>
 #include <asm/mmu_context.h>
-#include <asm/tlbflush.h>
+
+static inline void local_flush_tlb_all_asid(unsigned long asid)
+{
+       __asm__ __volatile__ ("sfence.vma x0, %0"
+                       :
+                       : "r" (asid)
+                       : "memory");
+}
+
+static inline void local_flush_tlb_page_asid(unsigned long addr,
+               unsigned long asid)
+{
+       __asm__ __volatile__ ("sfence.vma %0, %1"
+                       :
+                       : "r" (addr), "r" (asid)
+                       : "memory");
+}
 
 void flush_tlb_all(void)
 {
@@ -15,7 +31,6 @@ void flush_tlb_all(void)
 static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
                                  unsigned long size, unsigned long stride)
 {
-       struct cpumask *pmask = &mm->context.tlb_stale_mask;
        struct cpumask *cmask = mm_cpumask(mm);
        unsigned int cpuid;
        bool broadcast;
@@ -27,16 +42,7 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
        /* check if the tlbflush needs to be sent to other CPUs */
        broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
        if (static_branch_unlikely(&use_asid_allocator)) {
-               unsigned long asid = atomic_long_read(&mm->context.id);
-
-               /*
-                * TLB will be immediately flushed on harts concurrently
-                * executing this MM context. TLB flush on other harts
-                * is deferred until this MM context migrates there.
-                */
-               cpumask_setall(pmask);
-               cpumask_clear_cpu(cpuid, pmask);
-               cpumask_andnot(pmask, pmask, cmask);
+               unsigned long asid = atomic_long_read(&mm->context.id) & asid_mask;
 
                if (broadcast) {
                        sbi_remote_sfence_vma_asid(cmask, start, size, asid);
index 9b14045065b6e1e4bfbfcfa6c714a1d5d1e08a6e..74b5cd264862247fc040cabfe24c6a289d3d08a3 100644 (file)
@@ -57,11 +57,19 @@ repeat:
        if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_data.start && initrd_data.size &&
            intersects(initrd_data.start, initrd_data.size, safe_addr, size))
                safe_addr = initrd_data.start + initrd_data.size;
+       if (intersects(safe_addr, size, (unsigned long)comps, comps->len)) {
+               safe_addr = (unsigned long)comps + comps->len;
+               goto repeat;
+       }
        for_each_rb_entry(comp, comps)
                if (intersects(safe_addr, size, comp->addr, comp->len)) {
                        safe_addr = comp->addr + comp->len;
                        goto repeat;
                }
+       if (intersects(safe_addr, size, (unsigned long)certs, certs->len)) {
+               safe_addr = (unsigned long)certs + certs->len;
+               goto repeat;
+       }
        for_each_rb_entry(cert, certs)
                if (intersects(safe_addr, size, cert->addr, cert->len)) {
                        safe_addr = cert->addr + cert->len;
index 3c68fe49042c2630bf37230b672c5edf26d5338d..4ccf66d29fc24b9cad9425fc0fcb9a01bfe4b3c0 100644 (file)
@@ -23,7 +23,6 @@ CONFIG_NUMA_BALANCING=y
 CONFIG_MEMCG=y
 CONFIG_BLK_CGROUP=y
 CONFIG_CFS_BANDWIDTH=y
-CONFIG_RT_GROUP_SCHED=y
 CONFIG_CGROUP_PIDS=y
 CONFIG_CGROUP_RDMA=y
 CONFIG_CGROUP_FREEZER=y
@@ -90,7 +89,6 @@ CONFIG_MINIX_SUBPARTITION=y
 CONFIG_SOLARIS_X86_PARTITION=y
 CONFIG_UNIXWARE_DISKLABEL=y
 CONFIG_IOSCHED_BFQ=y
-CONFIG_BFQ_GROUP_IOSCHED=y
 CONFIG_BINFMT_MISC=m
 CONFIG_ZSWAP=y
 CONFIG_ZSMALLOC_STAT=y
@@ -298,7 +296,6 @@ CONFIG_IP_NF_TARGET_REJECT=m
 CONFIG_IP_NF_NAT=m
 CONFIG_IP_NF_TARGET_MASQUERADE=m
 CONFIG_IP_NF_MANGLE=m
-CONFIG_IP_NF_TARGET_CLUSTERIP=m
 CONFIG_IP_NF_TARGET_ECN=m
 CONFIG_IP_NF_TARGET_TTL=m
 CONFIG_IP_NF_RAW=m
@@ -340,7 +337,6 @@ CONFIG_BRIDGE_MRP=y
 CONFIG_VLAN_8021Q=m
 CONFIG_VLAN_8021Q_GVRP=y
 CONFIG_NET_SCHED=y
-CONFIG_NET_SCH_CBQ=m
 CONFIG_NET_SCH_HTB=m
 CONFIG_NET_SCH_HFSC=m
 CONFIG_NET_SCH_PRIO=m
@@ -351,7 +347,6 @@ CONFIG_NET_SCH_SFQ=m
 CONFIG_NET_SCH_TEQL=m
 CONFIG_NET_SCH_TBF=m
 CONFIG_NET_SCH_GRED=m
-CONFIG_NET_SCH_DSMARK=m
 CONFIG_NET_SCH_NETEM=m
 CONFIG_NET_SCH_DRR=m
 CONFIG_NET_SCH_MQPRIO=m
@@ -363,14 +358,11 @@ CONFIG_NET_SCH_INGRESS=m
 CONFIG_NET_SCH_PLUG=m
 CONFIG_NET_SCH_ETS=m
 CONFIG_NET_CLS_BASIC=m
-CONFIG_NET_CLS_TCINDEX=m
 CONFIG_NET_CLS_ROUTE4=m
 CONFIG_NET_CLS_FW=m
 CONFIG_NET_CLS_U32=m
 CONFIG_CLS_U32_PERF=y
 CONFIG_CLS_U32_MARK=y
-CONFIG_NET_CLS_RSVP=m
-CONFIG_NET_CLS_RSVP6=m
 CONFIG_NET_CLS_FLOW=m
 CONFIG_NET_CLS_CGROUP=y
 CONFIG_NET_CLS_BPF=m
@@ -584,7 +576,7 @@ CONFIG_DIAG288_WATCHDOG=m
 CONFIG_FB=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
-# CONFIG_HID is not set
+# CONFIG_HID_SUPPORT is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_INFINIBAND=m
 CONFIG_INFINIBAND_USER_ACCESS=m
@@ -828,6 +820,7 @@ CONFIG_PANIC_ON_OOPS=y
 CONFIG_DETECT_HUNG_TASK=y
 CONFIG_WQ_WATCHDOG=y
 CONFIG_TEST_LOCKUP=m
+CONFIG_DEBUG_PREEMPT=y
 CONFIG_PROVE_LOCKING=y
 CONFIG_LOCK_STAT=y
 CONFIG_DEBUG_ATOMIC_SLEEP=y
@@ -843,6 +836,7 @@ CONFIG_RCU_CPU_STALL_TIMEOUT=300
 # CONFIG_RCU_TRACE is not set
 CONFIG_LATENCYTOP=y
 CONFIG_BOOTTIME_TRACING=y
+CONFIG_FPROBE=y
 CONFIG_FUNCTION_PROFILER=y
 CONFIG_STACK_TRACER=y
 CONFIG_IRQSOFF_TRACER=y
@@ -857,6 +851,7 @@ CONFIG_SAMPLES=y
 CONFIG_SAMPLE_TRACE_PRINTK=m
 CONFIG_SAMPLE_FTRACE_DIRECT=m
 CONFIG_SAMPLE_FTRACE_DIRECT_MULTI=m
+CONFIG_SAMPLE_FTRACE_OPS=m
 CONFIG_DEBUG_ENTRY=y
 CONFIG_CIO_INJECT=y
 CONFIG_KUNIT=m
index 9ab91632f74cca3c35eefdcd80d4d94adf74704a..693297a2e89733d888c30e78e9d95f981c6db015 100644 (file)
@@ -21,7 +21,6 @@ CONFIG_NUMA_BALANCING=y
 CONFIG_MEMCG=y
 CONFIG_BLK_CGROUP=y
 CONFIG_CFS_BANDWIDTH=y
-CONFIG_RT_GROUP_SCHED=y
 CONFIG_CGROUP_PIDS=y
 CONFIG_CGROUP_RDMA=y
 CONFIG_CGROUP_FREEZER=y
@@ -85,7 +84,6 @@ CONFIG_MINIX_SUBPARTITION=y
 CONFIG_SOLARIS_X86_PARTITION=y
 CONFIG_UNIXWARE_DISKLABEL=y
 CONFIG_IOSCHED_BFQ=y
-CONFIG_BFQ_GROUP_IOSCHED=y
 CONFIG_BINFMT_MISC=m
 CONFIG_ZSWAP=y
 CONFIG_ZSMALLOC_STAT=y
@@ -289,7 +287,6 @@ CONFIG_IP_NF_TARGET_REJECT=m
 CONFIG_IP_NF_NAT=m
 CONFIG_IP_NF_TARGET_MASQUERADE=m
 CONFIG_IP_NF_MANGLE=m
-CONFIG_IP_NF_TARGET_CLUSTERIP=m
 CONFIG_IP_NF_TARGET_ECN=m
 CONFIG_IP_NF_TARGET_TTL=m
 CONFIG_IP_NF_RAW=m
@@ -330,7 +327,6 @@ CONFIG_BRIDGE_MRP=y
 CONFIG_VLAN_8021Q=m
 CONFIG_VLAN_8021Q_GVRP=y
 CONFIG_NET_SCHED=y
-CONFIG_NET_SCH_CBQ=m
 CONFIG_NET_SCH_HTB=m
 CONFIG_NET_SCH_HFSC=m
 CONFIG_NET_SCH_PRIO=m
@@ -341,7 +337,6 @@ CONFIG_NET_SCH_SFQ=m
 CONFIG_NET_SCH_TEQL=m
 CONFIG_NET_SCH_TBF=m
 CONFIG_NET_SCH_GRED=m
-CONFIG_NET_SCH_DSMARK=m
 CONFIG_NET_SCH_NETEM=m
 CONFIG_NET_SCH_DRR=m
 CONFIG_NET_SCH_MQPRIO=m
@@ -353,14 +348,11 @@ CONFIG_NET_SCH_INGRESS=m
 CONFIG_NET_SCH_PLUG=m
 CONFIG_NET_SCH_ETS=m
 CONFIG_NET_CLS_BASIC=m
-CONFIG_NET_CLS_TCINDEX=m
 CONFIG_NET_CLS_ROUTE4=m
 CONFIG_NET_CLS_FW=m
 CONFIG_NET_CLS_U32=m
 CONFIG_CLS_U32_PERF=y
 CONFIG_CLS_U32_MARK=y
-CONFIG_NET_CLS_RSVP=m
-CONFIG_NET_CLS_RSVP6=m
 CONFIG_NET_CLS_FLOW=m
 CONFIG_NET_CLS_CGROUP=y
 CONFIG_NET_CLS_BPF=m
@@ -573,7 +565,7 @@ CONFIG_DIAG288_WATCHDOG=m
 CONFIG_FB=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
-# CONFIG_HID is not set
+# CONFIG_HID_SUPPORT is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_INFINIBAND=m
 CONFIG_INFINIBAND_USER_ACCESS=m
@@ -795,6 +787,7 @@ CONFIG_RCU_REF_SCALE_TEST=m
 CONFIG_RCU_CPU_STALL_TIMEOUT=60
 CONFIG_LATENCYTOP=y
 CONFIG_BOOTTIME_TRACING=y
+CONFIG_FPROBE=y
 CONFIG_FUNCTION_PROFILER=y
 CONFIG_STACK_TRACER=y
 CONFIG_SCHED_TRACER=y
@@ -805,6 +798,7 @@ CONFIG_SAMPLES=y
 CONFIG_SAMPLE_TRACE_PRINTK=m
 CONFIG_SAMPLE_FTRACE_DIRECT=m
 CONFIG_SAMPLE_FTRACE_DIRECT_MULTI=m
+CONFIG_SAMPLE_FTRACE_OPS=m
 CONFIG_KUNIT=m
 CONFIG_KUNIT_DEBUGFS=y
 CONFIG_LKDTM=m
index a9c0c81d1de992c8cfdadb7985e293856318681e..33a232bb68af95b460eb098136fbdd23e3045751 100644 (file)
@@ -58,7 +58,7 @@ CONFIG_ZFCP=y
 # CONFIG_VMCP is not set
 # CONFIG_MONWRITER is not set
 # CONFIG_S390_VMUR is not set
-# CONFIG_HID is not set
+# CONFIG_HID_SUPPORT is not set
 # CONFIG_VIRTIO_MENU is not set
 # CONFIG_VHOST_MENU is not set
 # CONFIG_IOMMU_SUPPORT is not set
index 9250fde1f97d3710439c40e225bf953c4c259cf9..da6dac36e959f2e4720995bf02d14b3c5995b65e 100644 (file)
@@ -305,7 +305,7 @@ static inline u8 gisa_get_ipm_or_restore_iam(struct kvm_s390_gisa_interrupt *gi)
 
 static inline int gisa_in_alert_list(struct kvm_s390_gisa *gisa)
 {
-       return READ_ONCE(gisa->next_alert) != (u32)(u64)gisa;
+       return READ_ONCE(gisa->next_alert) != (u32)virt_to_phys(gisa);
 }
 
 static inline void gisa_set_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
@@ -3168,7 +3168,7 @@ void kvm_s390_gisa_init(struct kvm *kvm)
        hrtimer_init(&gi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
        gi->timer.function = gisa_vcpu_kicker;
        memset(gi->origin, 0, sizeof(struct kvm_s390_gisa));
-       gi->origin->next_alert = (u32)(u64)gi->origin;
+       gi->origin->next_alert = (u32)virt_to_phys(gi->origin);
        VM_EVENT(kvm, 3, "gisa 0x%pK initialized", gi->origin);
 }
 
index b124d586db5535f8038ad2a381d861ce8f9830a1..7dab00f1e83359a98d3c325039d1c4bb30c5c894 100644 (file)
@@ -112,7 +112,7 @@ static int zpci_reset_aipb(u8 nisc)
                return -EINVAL;
 
        aift->sbv = zpci_aif_sbv;
-       aift->gait = (struct zpci_gaite *)zpci_aipb->aipb.gait;
+       aift->gait = phys_to_virt(zpci_aipb->aipb.gait);
 
        return 0;
 }
index b6a0219e470a4a4ba4609c5fb2131a06d9ddc0c7..8d6b765abf29bffd3a9c5ef57f44b10645e0f3a4 100644 (file)
@@ -138,11 +138,15 @@ static int prepare_cpuflags(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 }
 /* Copy to APCB FORMAT1 from APCB FORMAT0 */
 static int setup_apcb10(struct kvm_vcpu *vcpu, struct kvm_s390_apcb1 *apcb_s,
-                       unsigned long apcb_o, struct kvm_s390_apcb1 *apcb_h)
+                       unsigned long crycb_gpa, struct kvm_s390_apcb1 *apcb_h)
 {
        struct kvm_s390_apcb0 tmp;
+       unsigned long apcb_gpa;
 
-       if (read_guest_real(vcpu, apcb_o, &tmp, sizeof(struct kvm_s390_apcb0)))
+       apcb_gpa = crycb_gpa + offsetof(struct kvm_s390_crypto_cb, apcb0);
+
+       if (read_guest_real(vcpu, apcb_gpa, &tmp,
+                           sizeof(struct kvm_s390_apcb0)))
                return -EFAULT;
 
        apcb_s->apm[0] = apcb_h->apm[0] & tmp.apm[0];
@@ -157,15 +161,19 @@ static int setup_apcb10(struct kvm_vcpu *vcpu, struct kvm_s390_apcb1 *apcb_s,
  * setup_apcb00 - Copy to APCB FORMAT0 from APCB FORMAT0
  * @vcpu: pointer to the virtual CPU
  * @apcb_s: pointer to start of apcb in the shadow crycb
- * @apcb_o: pointer to start of original apcb in the guest2
+ * @crycb_gpa: guest physical address to start of original guest crycb
  * @apcb_h: pointer to start of apcb in the guest1
  *
  * Returns 0 and -EFAULT on error reading guest apcb
  */
 static int setup_apcb00(struct kvm_vcpu *vcpu, unsigned long *apcb_s,
-                       unsigned long apcb_o, unsigned long *apcb_h)
+                       unsigned long crycb_gpa, unsigned long *apcb_h)
 {
-       if (read_guest_real(vcpu, apcb_o, apcb_s,
+       unsigned long apcb_gpa;
+
+       apcb_gpa = crycb_gpa + offsetof(struct kvm_s390_crypto_cb, apcb0);
+
+       if (read_guest_real(vcpu, apcb_gpa, apcb_s,
                            sizeof(struct kvm_s390_apcb0)))
                return -EFAULT;
 
@@ -178,16 +186,20 @@ static int setup_apcb00(struct kvm_vcpu *vcpu, unsigned long *apcb_s,
  * setup_apcb11 - Copy the FORMAT1 APCB from the guest to the shadow CRYCB
  * @vcpu: pointer to the virtual CPU
  * @apcb_s: pointer to start of apcb in the shadow crycb
- * @apcb_o: pointer to start of original guest apcb
+ * @crycb_gpa: guest physical address to start of original guest crycb
  * @apcb_h: pointer to start of apcb in the host
  *
  * Returns 0 and -EFAULT on error reading guest apcb
  */
 static int setup_apcb11(struct kvm_vcpu *vcpu, unsigned long *apcb_s,
-                       unsigned long apcb_o,
+                       unsigned long crycb_gpa,
                        unsigned long *apcb_h)
 {
-       if (read_guest_real(vcpu, apcb_o, apcb_s,
+       unsigned long apcb_gpa;
+
+       apcb_gpa = crycb_gpa + offsetof(struct kvm_s390_crypto_cb, apcb1);
+
+       if (read_guest_real(vcpu, apcb_gpa, apcb_s,
                            sizeof(struct kvm_s390_apcb1)))
                return -EFAULT;
 
@@ -200,7 +212,7 @@ static int setup_apcb11(struct kvm_vcpu *vcpu, unsigned long *apcb_s,
  * setup_apcb - Create a shadow copy of the apcb.
  * @vcpu: pointer to the virtual CPU
  * @crycb_s: pointer to shadow crycb
- * @crycb_o: pointer to original guest crycb
+ * @crycb_gpa: guest physical address of original guest crycb
  * @crycb_h: pointer to the host crycb
  * @fmt_o: format of the original guest crycb.
  * @fmt_h: format of the host crycb.
@@ -211,50 +223,46 @@ static int setup_apcb11(struct kvm_vcpu *vcpu, unsigned long *apcb_s,
  * Return 0 or an error number if the guest and host crycb are incompatible.
  */
 static int setup_apcb(struct kvm_vcpu *vcpu, struct kvm_s390_crypto_cb *crycb_s,
-              const u32 crycb_o,
+              const u32 crycb_gpa,
               struct kvm_s390_crypto_cb *crycb_h,
               int fmt_o, int fmt_h)
 {
-       struct kvm_s390_crypto_cb *crycb;
-
-       crycb = (struct kvm_s390_crypto_cb *) (unsigned long)crycb_o;
-
        switch (fmt_o) {
        case CRYCB_FORMAT2:
-               if ((crycb_o & PAGE_MASK) != ((crycb_o + 256) & PAGE_MASK))
+               if ((crycb_gpa & PAGE_MASK) != ((crycb_gpa + 256) & PAGE_MASK))
                        return -EACCES;
                if (fmt_h != CRYCB_FORMAT2)
                        return -EINVAL;
                return setup_apcb11(vcpu, (unsigned long *)&crycb_s->apcb1,
-                                   (unsigned long) &crycb->apcb1,
+                                   crycb_gpa,
                                    (unsigned long *)&crycb_h->apcb1);
        case CRYCB_FORMAT1:
                switch (fmt_h) {
                case CRYCB_FORMAT2:
                        return setup_apcb10(vcpu, &crycb_s->apcb1,
-                                           (unsigned long) &crycb->apcb0,
+                                           crycb_gpa,
                                            &crycb_h->apcb1);
                case CRYCB_FORMAT1:
                        return setup_apcb00(vcpu,
                                            (unsigned long *) &crycb_s->apcb0,
-                                           (unsigned long) &crycb->apcb0,
+                                           crycb_gpa,
                                            (unsigned long *) &crycb_h->apcb0);
                }
                break;
        case CRYCB_FORMAT0:
-               if ((crycb_o & PAGE_MASK) != ((crycb_o + 32) & PAGE_MASK))
+               if ((crycb_gpa & PAGE_MASK) != ((crycb_gpa + 32) & PAGE_MASK))
                        return -EACCES;
 
                switch (fmt_h) {
                case CRYCB_FORMAT2:
                        return setup_apcb10(vcpu, &crycb_s->apcb1,
-                                           (unsigned long) &crycb->apcb0,
+                                           crycb_gpa,
                                            &crycb_h->apcb1);
                case CRYCB_FORMAT1:
                case CRYCB_FORMAT0:
                        return setup_apcb00(vcpu,
                                            (unsigned long *) &crycb_s->apcb0,
-                                           (unsigned long) &crycb->apcb0,
+                                           crycb_gpa,
                                            (unsigned long *) &crycb_h->apcb0);
                }
        }
index ef38b1514c77aedb56b66f004b5c2a4fcfc23e4f..e16afacc8fd1b97c6eca33d456354c481a307704 100644 (file)
@@ -544,8 +544,7 @@ static struct resource *__alloc_res(struct zpci_dev *zdev, unsigned long start,
        return r;
 }
 
-int zpci_setup_bus_resources(struct zpci_dev *zdev,
-                            struct list_head *resources)
+int zpci_setup_bus_resources(struct zpci_dev *zdev)
 {
        unsigned long addr, size, flags;
        struct resource *res;
@@ -581,7 +580,6 @@ int zpci_setup_bus_resources(struct zpci_dev *zdev,
                        return -ENOMEM;
                }
                zdev->bars[i].res = res;
-               pci_add_resource(resources, res);
        }
        zdev->has_resources = 1;
 
@@ -590,17 +588,23 @@ int zpci_setup_bus_resources(struct zpci_dev *zdev,
 
 static void zpci_cleanup_bus_resources(struct zpci_dev *zdev)
 {
+       struct resource *res;
        int i;
 
+       pci_lock_rescan_remove();
        for (i = 0; i < PCI_STD_NUM_BARS; i++) {
-               if (!zdev->bars[i].size || !zdev->bars[i].res)
+               res = zdev->bars[i].res;
+               if (!res)
                        continue;
 
+               release_resource(res);
+               pci_bus_remove_resource(zdev->zbus->bus, res);
                zpci_free_iomap(zdev, zdev->bars[i].map_idx);
-               release_resource(zdev->bars[i].res);
-               kfree(zdev->bars[i].res);
+               zdev->bars[i].res = NULL;
+               kfree(res);
        }
        zdev->has_resources = 0;
+       pci_unlock_rescan_remove();
 }
 
 int pcibios_device_add(struct pci_dev *pdev)
index 6a8da1b742ae5aa76d08d6abb34e94735bc1f08e..a99926af2b69a352cdd7480ed1f2888f1728a7f9 100644 (file)
@@ -41,9 +41,7 @@ static int zpci_nb_devices;
  */
 static int zpci_bus_prepare_device(struct zpci_dev *zdev)
 {
-       struct resource_entry *window, *n;
-       struct resource *res;
-       int rc;
+       int rc, i;
 
        if (!zdev_enabled(zdev)) {
                rc = zpci_enable_device(zdev);
@@ -57,10 +55,10 @@ static int zpci_bus_prepare_device(struct zpci_dev *zdev)
        }
 
        if (!zdev->has_resources) {
-               zpci_setup_bus_resources(zdev, &zdev->zbus->resources);
-               resource_list_for_each_entry_safe(window, n, &zdev->zbus->resources) {
-                       res = window->res;
-                       pci_bus_add_resource(zdev->zbus->bus, res, 0);
+               zpci_setup_bus_resources(zdev);
+               for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+                       if (zdev->bars[i].res)
+                               pci_bus_add_resource(zdev->zbus->bus, zdev->bars[i].res, 0);
                }
        }
 
index e96c9860e0644b4d1dd144d5c0fccbdf484a60cb..af9f0ac79a1b1b8a249e7a22703f3dec45dcdad8 100644 (file)
@@ -30,8 +30,7 @@ static inline void zpci_zdev_get(struct zpci_dev *zdev)
 
 int zpci_alloc_domain(int domain);
 void zpci_free_domain(int domain);
-int zpci_setup_bus_resources(struct zpci_dev *zdev,
-                            struct list_head *resources);
+int zpci_setup_bus_resources(struct zpci_dev *zdev);
 
 static inline struct zpci_dev *zdev_from_bus(struct pci_bus *bus,
                                             unsigned int devfn)
index 8c45b198b62f555d1565c0d68b38304afe767f7c..bccea57dee81ecd12caac3ecc7a2cbe654d9fe90 100644 (file)
@@ -923,6 +923,7 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs)
 
                /* Event overflow */
                handled++;
+               status &= ~mask;
                perf_sample_data_init(&data, 0, hwc->last_period);
 
                if (!x86_perf_event_set_period(event))
@@ -933,8 +934,6 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs)
 
                if (perf_event_overflow(event, &data, regs))
                        x86_pmu_stop(event, 0);
-
-               status &= ~mask;
        }
 
        /*
index 73c9672c123b980c8507c6fca83628a9ca9f82bc..42abcd3db7a7eb746f96aad0a5e7c3f8c1645679 100644 (file)
 #define X86_FEATURE_VIRT_SSBD          (13*32+25) /* Virtualized Speculative Store Bypass Disable */
 #define X86_FEATURE_AMD_SSB_NO         (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */
 #define X86_FEATURE_CPPC               (13*32+27) /* Collaborative Processor Performance Control */
+#define X86_FEATURE_AMD_PSFD            (13*32+28) /* "" Predictive Store Forwarding Disable */
 #define X86_FEATURE_BTC_NO             (13*32+29) /* "" Not vulnerable to Branch Type Confusion */
 #define X86_FEATURE_BRS                        (13*32+31) /* Branch Sampling available */
 
index 8dc345cc63188b4f338317dc18752b1bfd0954e4..430ca22170e06c7575784b90770c7a3d23b111d5 100644 (file)
@@ -54,8 +54,8 @@ KVM_X86_OP(set_rflags)
 KVM_X86_OP(get_if_flag)
 KVM_X86_OP(flush_tlb_all)
 KVM_X86_OP(flush_tlb_current)
-KVM_X86_OP_OPTIONAL(tlb_remote_flush)
-KVM_X86_OP_OPTIONAL(tlb_remote_flush_with_range)
+KVM_X86_OP_OPTIONAL(flush_remote_tlbs)
+KVM_X86_OP_OPTIONAL(flush_remote_tlbs_range)
 KVM_X86_OP(flush_tlb_gva)
 KVM_X86_OP(flush_tlb_guest)
 KVM_X86_OP(vcpu_pre_run)
index a440ceae06fb2ebd0b792560a7af3ee146057d08..d197c54dcaa642537c9bd5e5c907e753792ba41c 100644 (file)
@@ -420,6 +420,10 @@ struct kvm_mmu_root_info {
 
 #define KVM_MMU_NUM_PREV_ROOTS 3
 
+#define KVM_MMU_ROOT_CURRENT           BIT(0)
+#define KVM_MMU_ROOT_PREVIOUS(i)       BIT(1+i)
+#define KVM_MMU_ROOTS_ALL              (BIT(1 + KVM_MMU_NUM_PREV_ROOTS) - 1)
+
 #define KVM_HAVE_MMU_RWLOCK
 
 struct kvm_mmu_page;
@@ -439,9 +443,8 @@ struct kvm_mmu {
        gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
                            gpa_t gva_or_gpa, u64 access,
                            struct x86_exception *exception);
-       int (*sync_page)(struct kvm_vcpu *vcpu,
-                        struct kvm_mmu_page *sp);
-       void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa);
+       int (*sync_spte)(struct kvm_vcpu *vcpu,
+                        struct kvm_mmu_page *sp, int i);
        struct kvm_mmu_root_info root;
        union kvm_cpu_role cpu_role;
        union kvm_mmu_page_role root_role;
@@ -479,11 +482,6 @@ struct kvm_mmu {
        u64 pdptrs[4]; /* pae */
 };
 
-struct kvm_tlb_range {
-       u64 start_gfn;
-       u64 pages;
-};
-
 enum pmc_type {
        KVM_PMC_GP = 0,
        KVM_PMC_FIXED,
@@ -1585,9 +1583,9 @@ struct kvm_x86_ops {
 
        void (*flush_tlb_all)(struct kvm_vcpu *vcpu);
        void (*flush_tlb_current)(struct kvm_vcpu *vcpu);
-       int  (*tlb_remote_flush)(struct kvm *kvm);
-       int  (*tlb_remote_flush_with_range)(struct kvm *kvm,
-                       struct kvm_tlb_range *range);
+       int  (*flush_remote_tlbs)(struct kvm *kvm);
+       int  (*flush_remote_tlbs_range)(struct kvm *kvm, gfn_t gfn,
+                                       gfn_t nr_pages);
 
        /*
         * Flush any TLB entries associated with the given GVA.
@@ -1791,8 +1789,8 @@ void kvm_arch_free_vm(struct kvm *kvm);
 #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
 static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
 {
-       if (kvm_x86_ops.tlb_remote_flush &&
-           !static_call(kvm_x86_tlb_remote_flush)(kvm))
+       if (kvm_x86_ops.flush_remote_tlbs &&
+           !static_call(kvm_x86_flush_remote_tlbs)(kvm))
                return 0;
        else
                return -ENOTSUPP;
@@ -1997,10 +1995,6 @@ static inline int __kvm_irq_line_state(unsigned long *irq_state,
        return !!(*irq_state);
 }
 
-#define KVM_MMU_ROOT_CURRENT           BIT(0)
-#define KVM_MMU_ROOT_PREVIOUS(i)       BIT(1+i)
-#define KVM_MMU_ROOTS_ALL              (~0UL)
-
 int kvm_pic_set_irq(struct kvm_pic *pic, int irq, int irq_source_id, int level);
 void kvm_pic_clear_all(struct kvm_pic *pic, int irq_source_id);
 
@@ -2044,8 +2038,8 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
                       void *insn, int insn_len);
 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
-void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
-                           gva_t gva, hpa_t root_hpa);
+void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
+                            u64 addr, unsigned long roots);
 void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
 void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd);
 
@@ -2207,4 +2201,11 @@ int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages);
         KVM_X86_QUIRK_FIX_HYPERCALL_INSN |     \
         KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS)
 
+/*
+ * KVM previously used a u32 field in kvm_run to indicate the hypercall was
+ * initiated from long mode. KVM now sets bit 0 to indicate long mode, but the
+ * remaining 31 lower bits must be 0 to preserve ABI.
+ */
+#define KVM_EXIT_HYPERCALL_MBZ         GENMASK_ULL(31, 1)
+
 #endif /* _ASM_X86_KVM_HOST_H */
index b8357d6ecd47ef6766a0fe9fe5161f6447228c16..b63be696b776a7d764359cb990533871d910197d 100644 (file)
@@ -128,8 +128,9 @@ struct snp_psc_desc {
        struct psc_entry entries[VMGEXIT_PSC_MAX_ENTRY];
 } __packed;
 
-/* Guest message request error code */
+/* Guest message request error codes */
 #define SNP_GUEST_REQ_INVALID_LEN      BIT_ULL(32)
+#define SNP_GUEST_REQ_ERR_BUSY         BIT_ULL(33)
 
 #define GHCB_MSR_TERM_REQ              0x100
 #define GHCB_MSR_TERM_REASON_SET_POS   12
index 6daa9b0c8d11474b73ebd63b703fc60ef8972ba3..a3c29b1496c8353eca2f6dd13db042018fa25e51 100644 (file)
  * Sub-leaf 2: EAX: host tsc frequency in kHz
  */
 
+#define XEN_CPUID_TSC_EMULATED               (1u << 0)
+#define XEN_CPUID_HOST_TSC_RELIABLE          (1u << 1)
+#define XEN_CPUID_RDTSCP_INSTR_AVAIL         (1u << 2)
+
+#define XEN_CPUID_TSC_MODE_DEFAULT           (0)
+#define XEN_CPUID_TSC_MODE_ALWAYS_EMULATE    (1u)
+#define XEN_CPUID_TSC_MODE_NEVER_EMULATE     (2u)
+#define XEN_CPUID_TSC_MODE_PVRDTSCP          (3u)
+
 /*
  * Leaf 5 (0x40000x04)
  * HVM-specific features
  * Sub-leaf 0: EAX: Features
  * Sub-leaf 0: EBX: vcpu id (iff EAX has XEN_HVM_CPUID_VCPU_ID_PRESENT flag)
+ * Sub-leaf 0: ECX: domain id (iff EAX has XEN_HVM_CPUID_DOMID_PRESENT flag)
  */
 #define XEN_HVM_CPUID_APIC_ACCESS_VIRT (1u << 0) /* Virtualized APIC registers */
 #define XEN_HVM_CPUID_X2APIC_VIRT      (1u << 1) /* Virtualized x2APIC accesses */
 #define XEN_HVM_CPUID_VCPU_ID_PRESENT  (1u << 3) /* vcpu id is present in EBX */
 #define XEN_HVM_CPUID_DOMID_PRESENT    (1u << 4) /* domid is present in ECX */
 /*
- * Bits 55:49 from the IO-APIC RTE and bits 11:5 from the MSI address can be
- * used to store high bits for the Destination ID. This expands the Destination
- * ID field from 8 to 15 bits, allowing to target APIC IDs up 32768.
+ * With interrupt format set to 0 (non-remappable) bits 55:49 from the
+ * IO-APIC RTE and bits 11:5 from the MSI address can be used to store
+ * high bits for the Destination ID. This expands the Destination ID
+ * field from 8 to 15 bits, allowing to target APIC IDs up 32768.
  */
 #define XEN_HVM_CPUID_EXT_DEST_ID      (1u << 5)
-/* Per-vCPU event channel upcalls */
+/*
+ * Per-vCPU event channel upcalls work correctly with physical IRQs
+ * bound to event channels.
+ */
 #define XEN_HVM_CPUID_UPCALL_VECTOR    (1u << 6)
 
 /*
index 7f467fe05d42ea702d45679bc8ce27d18656b124..1a6a1f98794967d260e2898b0dbb62f830d45664 100644 (file)
@@ -559,4 +559,7 @@ struct kvm_pmu_event_filter {
 #define KVM_VCPU_TSC_CTRL 0 /* control group for the timestamp counter (TSC) */
 #define   KVM_VCPU_TSC_OFFSET 0 /* attribute for the TSC offset */
 
+/* x86-specific KVM_EXIT_HYPERCALL flags. */
+#define KVM_EXIT_HYPERCALL_LONG_MODE   BIT(0)
+
 #endif /* _ASM_X86_KVM_H */
index 7832a69d170e723c1088098606d9f7b9796d1a94..2eec60f50057a7204ea9e36e208d2637614de06d 100644 (file)
@@ -2355,6 +2355,7 @@ static void mce_restart(void)
 {
        mce_timer_delete_all();
        on_each_cpu(mce_cpu_restart, NULL, 1);
+       mce_schedule_work();
 }
 
 /* Toggle features for corrected errors */
index eb07d4435391bedc4db1a2d6483bda477c093bea..b44c487727d456e445a4ad53ddaff76d84e449b1 100644 (file)
@@ -368,7 +368,6 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
 {
        struct resctrl_schema *s;
        struct rdtgroup *rdtgrp;
-       struct rdt_domain *dom;
        struct rdt_resource *r;
        char *tok, *resname;
        int ret = 0;
@@ -397,10 +396,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
                goto out;
        }
 
-       list_for_each_entry(s, &resctrl_schema_all, list) {
-               list_for_each_entry(dom, &s->res->domains, list)
-                       memset(dom->staged_config, 0, sizeof(dom->staged_config));
-       }
+       rdt_staged_configs_clear();
 
        while ((tok = strsep(&buf, "\n")) != NULL) {
                resname = strim(strsep(&tok, ":"));
@@ -445,6 +441,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
        }
 
 out:
+       rdt_staged_configs_clear();
        rdtgroup_kn_unlock(of->kn);
        cpus_read_unlock();
        return ret ?: nbytes;
index 8edecc5763d8e31313294145b7e0af8a6e477611..85ceaf9a31ac20099c86647b4958e4ca0f28fa6b 100644 (file)
@@ -555,5 +555,6 @@ void __check_limbo(struct rdt_domain *d, bool force_free);
 void rdt_domain_reconfigure_cdp(struct rdt_resource *r);
 void __init thread_throttle_mode_init(void);
 void __init mbm_config_rftype_init(const char *config);
+void rdt_staged_configs_clear(void);
 
 #endif /* _ASM_X86_RESCTRL_INTERNAL_H */
index 884b6e9a7e31c36281afe21b2f507f9b223781b2..6ad33f355861feab9a1f013ff82e026e9638044b 100644 (file)
@@ -78,6 +78,19 @@ void rdt_last_cmd_printf(const char *fmt, ...)
        va_end(ap);
 }
 
+void rdt_staged_configs_clear(void)
+{
+       struct rdt_resource *r;
+       struct rdt_domain *dom;
+
+       lockdep_assert_held(&rdtgroup_mutex);
+
+       for_each_alloc_capable_rdt_resource(r) {
+               list_for_each_entry(dom, &r->domains, list)
+                       memset(dom->staged_config, 0, sizeof(dom->staged_config));
+       }
+}
+
 /*
  * Trivial allocator for CLOSIDs. Since h/w only supports a small number,
  * we can keep a bitmap of free CLOSIDs in a single integer.
@@ -3107,7 +3120,9 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
 {
        struct resctrl_schema *s;
        struct rdt_resource *r;
-       int ret;
+       int ret = 0;
+
+       rdt_staged_configs_clear();
 
        list_for_each_entry(s, &resctrl_schema_all, list) {
                r = s->res;
@@ -3119,20 +3134,22 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
                } else {
                        ret = rdtgroup_init_cat(s, rdtgrp->closid);
                        if (ret < 0)
-                               return ret;
+                               goto out;
                }
 
                ret = resctrl_arch_update_domains(r, rdtgrp->closid);
                if (ret < 0) {
                        rdt_last_cmd_puts("Failed to initialize allocations\n");
-                       return ret;
+                       goto out;
                }
 
        }
 
        rdtgrp->mode = RDT_MODE_SHAREABLE;
 
-       return 0;
+out:
+       rdt_staged_configs_clear();
+       return ret;
 }
 
 static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
index 714166cc25f2f6bff10cffd2dee9d40a2fca9db8..0bab497c94369428ceb247f8de26582b7716ce84 100644 (file)
@@ -1118,21 +1118,20 @@ void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate,
        zerofrom = offsetof(struct xregs_state, extended_state_area);
 
        /*
-        * The ptrace buffer is in non-compacted XSAVE format.  In
-        * non-compacted format disabled features still occupy state space,
-        * but there is no state to copy from in the compacted
-        * init_fpstate. The gap tracking will zero these states.
-        */
-       mask = fpstate->user_xfeatures;
-
-       /*
-        * Dynamic features are not present in init_fpstate. When they are
-        * in an all zeros init state, remove those from 'mask' to zero
-        * those features in the user buffer instead of retrieving them
-        * from init_fpstate.
+        * This 'mask' indicates which states to copy from fpstate.
+        * Those extended states that are not present in fpstate are
+        * either disabled or initialized:
+        *
+        * In non-compacted format, disabled features still occupy
+        * state space but there is no state to copy from in the
+        * compacted init_fpstate. The gap tracking will zero these
+        * states.
+        *
+        * The extended features have an all zeroes init state. Thus,
+        * remove them from 'mask' to zero those features in the user
+        * buffer instead of retrieving them from init_fpstate.
         */
-       if (fpu_state_size_dynamic())
-               mask &= (header.xfeatures | xinit->header.xcomp_bv);
+       mask = header.xfeatures;
 
        for_each_extended_xfeature(i, mask) {
                /*
@@ -1151,9 +1150,8 @@ void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate,
                        pkru.pkru = pkru_val;
                        membuf_write(&to, &pkru, sizeof(pkru));
                } else {
-                       copy_feature(header.xfeatures & BIT_ULL(i), &to,
+                       membuf_write(&to,
                                     __raw_xsave_addr(xsave, i),
-                                    __raw_xsave_addr(xinit, i),
                                     xstate_sizes[i]);
                }
                /*
index 1265ad519249c027cae82d8941d394b8dbc50795..fb4f1e01b64a28af7b85ff26fa8edac39ce67bd6 100644 (file)
@@ -136,10 +136,12 @@ SYM_TYPED_FUNC_START(ftrace_stub)
        RET
 SYM_FUNC_END(ftrace_stub)
 
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 SYM_TYPED_FUNC_START(ftrace_stub_graph)
        CALL_DEPTH_ACCOUNT
        RET
 SYM_FUNC_END(ftrace_stub_graph)
+#endif
 
 #ifdef CONFIG_DYNAMIC_FTRACE
 
index 679026a640efd8e2e87a03d3b5e57cf18de1a6dd..3f664ab277c4987ac57f41959a300f3a8f5c6439 100644 (file)
@@ -2183,9 +2183,6 @@ int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, unsigned
        struct ghcb *ghcb;
        int ret;
 
-       if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
-               return -ENODEV;
-
        if (!fw_err)
                return -EINVAL;
 
@@ -2212,15 +2209,26 @@ int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, unsigned
        if (ret)
                goto e_put;
 
-       if (ghcb->save.sw_exit_info_2) {
-               /* Number of expected pages are returned in RBX */
-               if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST &&
-                   ghcb->save.sw_exit_info_2 == SNP_GUEST_REQ_INVALID_LEN)
-                       input->data_npages = ghcb_get_rbx(ghcb);
+       *fw_err = ghcb->save.sw_exit_info_2;
+       switch (*fw_err) {
+       case 0:
+               break;
 
-               *fw_err = ghcb->save.sw_exit_info_2;
+       case SNP_GUEST_REQ_ERR_BUSY:
+               ret = -EAGAIN;
+               break;
 
+       case SNP_GUEST_REQ_INVALID_LEN:
+               /* Number of expected pages are returned in RBX */
+               if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST) {
+                       input->data_npages = ghcb_get_rbx(ghcb);
+                       ret = -ENOSPC;
+                       break;
+               }
+               fallthrough;
+       default:
                ret = -EIO;
+               break;
        }
 
 e_put:
index b736ddb420886f7e5c725ef5d885a4aa52c5e5a8..b944492faefa332bf7d4ad865c3ac12da452eac0 100644 (file)
@@ -60,12 +60,6 @@ u32 xstate_required_size(u64 xstate_bv, bool compacted)
        return ret;
 }
 
-/*
- * This one is tied to SSB in the user API, and not
- * visible in /proc/cpuinfo.
- */
-#define KVM_X86_FEATURE_AMD_PSFD       (13*32+28) /* Predictive Store Forwarding Disable */
-
 #define F feature_bit
 
 /* Scattered Flag - For features that are scattered by cpufeatures.h. */
@@ -266,7 +260,7 @@ static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_e
                /* Update OSXSAVE bit */
                if (boot_cpu_has(X86_FEATURE_XSAVE))
                        cpuid_entry_change(best, X86_FEATURE_OSXSAVE,
-                                  kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE));
+                                          kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE));
 
                cpuid_entry_change(best, X86_FEATURE_APIC,
                           vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE);
@@ -275,7 +269,7 @@ static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_e
        best = cpuid_entry2_find(entries, nent, 7, 0);
        if (best && boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7)
                cpuid_entry_change(best, X86_FEATURE_OSPKE,
-                                  kvm_read_cr4_bits(vcpu, X86_CR4_PKE));
+                                  kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE));
 
        best = cpuid_entry2_find(entries, nent, 0xD, 0);
        if (best)
@@ -715,7 +709,7 @@ void kvm_set_cpu_caps(void)
                F(CLZERO) | F(XSAVEERPTR) |
                F(WBNOINVD) | F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) |
                F(AMD_SSB_NO) | F(AMD_STIBP) | F(AMD_STIBP_ALWAYS_ON) |
-               __feature_bit(KVM_X86_FEATURE_AMD_PSFD)
+               F(AMD_PSFD)
        );
 
        /*
index a20bec931764e73be41319352b153386599e8335..936a397a08cd2840d9aea8e477272e342c3f53a6 100644 (file)
@@ -1640,6 +1640,14 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
                        goto exception;
                break;
        case VCPU_SREG_CS:
+               /*
+                * KVM uses "none" when loading CS as part of emulating Real
+                * Mode exceptions and IRET (handled above).  In all other
+                * cases, loading CS without a control transfer is a KVM bug.
+                */
+               if (WARN_ON_ONCE(transfer == X86_TRANSFER_NONE))
+                       goto exception;
+
                if (!(seg_desc.type & 8))
                        goto exception;
 
index 4c91f626c05808305d5f554bbf39bd5ec0c5d1cd..75eae9c4998adfb221a6f105d55d64acf3a8c31a 100644 (file)
@@ -4,7 +4,7 @@
 
 #include <linux/kvm_host.h>
 
-#define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
+#define KVM_POSSIBLE_CR0_GUEST_BITS    (X86_CR0_TS | X86_CR0_WP)
 #define KVM_POSSIBLE_CR4_GUEST_BITS                              \
        (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR  \
         | X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD | X86_CR4_FSGSBASE)
@@ -157,6 +157,14 @@ static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
        return vcpu->arch.cr0 & mask;
 }
 
+static __always_inline bool kvm_is_cr0_bit_set(struct kvm_vcpu *vcpu,
+                                              unsigned long cr0_bit)
+{
+       BUILD_BUG_ON(!is_power_of_2(cr0_bit));
+
+       return !!kvm_read_cr0_bits(vcpu, cr0_bit);
+}
+
 static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
 {
        return kvm_read_cr0_bits(vcpu, ~0UL);
@@ -171,6 +179,14 @@ static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
        return vcpu->arch.cr4 & mask;
 }
 
+static __always_inline bool kvm_is_cr4_bit_set(struct kvm_vcpu *vcpu,
+                                              unsigned long cr4_bit)
+{
+       BUILD_BUG_ON(!is_power_of_2(cr4_bit));
+
+       return !!kvm_read_cr4_bits(vcpu, cr4_bit);
+}
+
 static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
 {
        if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
index 482d6639ef8894d089770aecfc2a63b77ad499ea..ded0bd688c6597b998e490ec754a50206ef5c874 100644 (file)
 #include "hyperv.h"
 #include "kvm_onhyperv.h"
 
+struct kvm_hv_tlb_range {
+       u64 start_gfn;
+       u64 pages;
+};
+
 static int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush,
                void *data)
 {
-       struct kvm_tlb_range *range = data;
+       struct kvm_hv_tlb_range *range = data;
 
        return hyperv_fill_flush_guest_mapping_list(flush, range->start_gfn,
                        range->pages);
 }
 
 static inline int hv_remote_flush_root_tdp(hpa_t root_tdp,
-                                          struct kvm_tlb_range *range)
+                                          struct kvm_hv_tlb_range *range)
 {
        if (range)
                return hyperv_flush_guest_mapping_range(root_tdp,
@@ -29,8 +34,8 @@ static inline int hv_remote_flush_root_tdp(hpa_t root_tdp,
                return hyperv_flush_guest_mapping(root_tdp);
 }
 
-int hv_remote_flush_tlb_with_range(struct kvm *kvm,
-               struct kvm_tlb_range *range)
+static int __hv_flush_remote_tlbs_range(struct kvm *kvm,
+                                       struct kvm_hv_tlb_range *range)
 {
        struct kvm_arch *kvm_arch = &kvm->arch;
        struct kvm_vcpu *vcpu;
@@ -86,19 +91,29 @@ int hv_remote_flush_tlb_with_range(struct kvm *kvm,
        spin_unlock(&kvm_arch->hv_root_tdp_lock);
        return ret;
 }
-EXPORT_SYMBOL_GPL(hv_remote_flush_tlb_with_range);
 
-int hv_remote_flush_tlb(struct kvm *kvm)
+int hv_flush_remote_tlbs_range(struct kvm *kvm, gfn_t start_gfn, gfn_t nr_pages)
+{
+       struct kvm_hv_tlb_range range = {
+               .start_gfn = start_gfn,
+               .pages = nr_pages,
+       };
+
+       return __hv_flush_remote_tlbs_range(kvm, &range);
+}
+EXPORT_SYMBOL_GPL(hv_flush_remote_tlbs_range);
+
+int hv_flush_remote_tlbs(struct kvm *kvm)
 {
-       return hv_remote_flush_tlb_with_range(kvm, NULL);
+       return __hv_flush_remote_tlbs_range(kvm, NULL);
 }
-EXPORT_SYMBOL_GPL(hv_remote_flush_tlb);
+EXPORT_SYMBOL_GPL(hv_flush_remote_tlbs);
 
 void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp)
 {
        struct kvm_arch *kvm_arch = &vcpu->kvm->arch;
 
-       if (kvm_x86_ops.tlb_remote_flush == hv_remote_flush_tlb) {
+       if (kvm_x86_ops.flush_remote_tlbs == hv_flush_remote_tlbs) {
                spin_lock(&kvm_arch->hv_root_tdp_lock);
                vcpu->arch.hv_root_tdp = root_tdp;
                if (root_tdp != kvm_arch->hv_root_tdp)
index 287e98ef9df3d820244133f4aa73547abe613f1d..ff127d313242b04301091f8372d526657ead331f 100644 (file)
@@ -7,9 +7,8 @@
 #define __ARCH_X86_KVM_KVM_ONHYPERV_H__
 
 #if IS_ENABLED(CONFIG_HYPERV)
-int hv_remote_flush_tlb_with_range(struct kvm *kvm,
-               struct kvm_tlb_range *range);
-int hv_remote_flush_tlb(struct kvm *kvm);
+int hv_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, gfn_t nr_pages);
+int hv_flush_remote_tlbs(struct kvm *kvm);
 void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp);
 #else /* !CONFIG_HYPERV */
 static inline void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp)
index 168c46fd8dd1893aa198872fcc019d36976551f9..92d5a1924fc18ec07bd6a95bdf6cfe328fbfed23 100644 (file)
@@ -113,6 +113,8 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
 bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
 int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
                                u64 fault_address, char *insn, int insn_len);
+void __kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu,
+                                       struct kvm_mmu *mmu);
 
 int kvm_mmu_load(struct kvm_vcpu *vcpu);
 void kvm_mmu_unload(struct kvm_vcpu *vcpu);
@@ -132,7 +134,7 @@ static inline unsigned long kvm_get_pcid(struct kvm_vcpu *vcpu, gpa_t cr3)
 {
        BUILD_BUG_ON((X86_CR3_PCID_MASK & PAGE_MASK) != 0);
 
-       return kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)
+       return kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE)
               ? cr3 & X86_CR3_PCID_MASK
               : 0;
 }
@@ -153,6 +155,24 @@ static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu)
                                          vcpu->arch.mmu->root_role.level);
 }
 
+static inline void kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu,
+                                                   struct kvm_mmu *mmu)
+{
+       /*
+        * When EPT is enabled, KVM may passthrough CR0.WP to the guest, i.e.
+        * @mmu's snapshot of CR0.WP and thus all related paging metadata may
+        * be stale.  Refresh CR0.WP and the metadata on-demand when checking
+        * for permission faults.  Exempt nested MMUs, i.e. MMUs for shadowing
+        * nEPT and nNPT, as CR0.WP is ignored in both cases.  Note, KVM does
+        * need to refresh nested_mmu, a.k.a. the walker used to translate L2
+        * GVAs to GPAs, as that "MMU" needs to honor L2's CR0.WP.
+        */
+       if (!tdp_enabled || mmu == &vcpu->arch.guest_mmu)
+               return;
+
+       __kvm_mmu_refresh_passthrough_bits(vcpu, mmu);
+}
+
 /*
  * Check if a given access (described through the I/D, W/R and U/S bits of a
  * page fault error code pfec) causes a permission fault with the given PTE
@@ -184,8 +204,12 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
        u64 implicit_access = access & PFERR_IMPLICIT_ACCESS;
        bool not_smap = ((rflags & X86_EFLAGS_AC) | implicit_access) == X86_EFLAGS_AC;
        int index = (pfec + (not_smap << PFERR_RSVD_BIT)) >> 1;
-       bool fault = (mmu->permissions[index] >> pte_access) & 1;
        u32 errcode = PFERR_PRESENT_MASK;
+       bool fault;
+
+       kvm_mmu_refresh_passthrough_bits(vcpu, mmu);
+
+       fault = (mmu->permissions[index] >> pte_access) & 1;
 
        WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK));
        if (unlikely(mmu->pkru_mask)) {
index 8ced48797d598d3d8ca5359d44978180ab6d0bd6..c8961f45e3b1c1c01108f2830da0aad9624601b2 100644 (file)
@@ -125,17 +125,31 @@ module_param(dbg, bool, 0644);
 #define PTE_LIST_EXT 14
 
 /*
- * Slight optimization of cacheline layout, by putting `more' and `spte_count'
- * at the start; then accessing it will only use one single cacheline for
- * either full (entries==PTE_LIST_EXT) case or entries<=6.
+ * struct pte_list_desc is the core data structure used to implement a custom
+ * list for tracking a set of related SPTEs, e.g. all the SPTEs that map a
+ * given GFN when used in the context of rmaps.  Using a custom list allows KVM
+ * to optimize for the common case where many GFNs will have at most a handful
+ * of SPTEs pointing at them, i.e. allows packing multiple SPTEs into a small
+ * memory footprint, which in turn improves runtime performance by exploiting
+ * cache locality.
+ *
+ * A list is comprised of one or more pte_list_desc objects (descriptors).
+ * Each individual descriptor stores up to PTE_LIST_EXT SPTEs.  If a descriptor
+ * is full and a new SPTEs needs to be added, a new descriptor is allocated and
+ * becomes the head of the list.  This means that by definitions, all tail
+ * descriptors are full.
+ *
+ * Note, the meta data fields are deliberately placed at the start of the
+ * structure to optimize the cacheline layout; accessing the descriptor will
+ * touch only a single cacheline so long as @spte_count<=6 (or if only the
+ * descriptors metadata is accessed).
  */
 struct pte_list_desc {
        struct pte_list_desc *more;
-       /*
-        * Stores number of entries stored in the pte_list_desc.  No need to be
-        * u64 but just for easier alignment.  When PTE_LIST_EXT, means full.
-        */
-       u64 spte_count;
+       /* The number of PTEs stored in _this_ descriptor. */
+       u32 spte_count;
+       /* The number of PTEs stored in all tails of this descriptor. */
+       u32 tail_count;
        u64 *sptes[PTE_LIST_EXT];
 };
 
@@ -242,32 +256,35 @@ static struct kvm_mmu_role_regs vcpu_to_role_regs(struct kvm_vcpu *vcpu)
        return regs;
 }
 
-static inline bool kvm_available_flush_tlb_with_range(void)
+static unsigned long get_guest_cr3(struct kvm_vcpu *vcpu)
 {
-       return kvm_x86_ops.tlb_remote_flush_with_range;
+       return kvm_read_cr3(vcpu);
 }
 
-static void kvm_flush_remote_tlbs_with_range(struct kvm *kvm,
-               struct kvm_tlb_range *range)
+static inline unsigned long kvm_mmu_get_guest_pgd(struct kvm_vcpu *vcpu,
+                                                 struct kvm_mmu *mmu)
 {
-       int ret = -ENOTSUPP;
-
-       if (range && kvm_x86_ops.tlb_remote_flush_with_range)
-               ret = static_call(kvm_x86_tlb_remote_flush_with_range)(kvm, range);
+       if (IS_ENABLED(CONFIG_RETPOLINE) && mmu->get_guest_pgd == get_guest_cr3)
+               return kvm_read_cr3(vcpu);
 
-       if (ret)
-               kvm_flush_remote_tlbs(kvm);
+       return mmu->get_guest_pgd(vcpu);
 }
 
-void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
-               u64 start_gfn, u64 pages)
+static inline bool kvm_available_flush_remote_tlbs_range(void)
 {
-       struct kvm_tlb_range range;
+       return kvm_x86_ops.flush_remote_tlbs_range;
+}
 
-       range.start_gfn = start_gfn;
-       range.pages = pages;
+void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t start_gfn,
+                                gfn_t nr_pages)
+{
+       int ret = -EOPNOTSUPP;
 
-       kvm_flush_remote_tlbs_with_range(kvm, &range);
+       if (kvm_x86_ops.flush_remote_tlbs_range)
+               ret = static_call(kvm_x86_flush_remote_tlbs_range)(kvm, start_gfn,
+                                                                  nr_pages);
+       if (ret)
+               kvm_flush_remote_tlbs(kvm);
 }
 
 static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index);
@@ -888,9 +905,9 @@ static void unaccount_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp)
        untrack_possible_nx_huge_page(kvm, sp);
 }
 
-static struct kvm_memory_slot *
-gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
-                           bool no_dirty_log)
+static struct kvm_memory_slot *gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu,
+                                                          gfn_t gfn,
+                                                          bool no_dirty_log)
 {
        struct kvm_memory_slot *slot;
 
@@ -929,53 +946,69 @@ static int pte_list_add(struct kvm_mmu_memory_cache *cache, u64 *spte,
                desc->sptes[0] = (u64 *)rmap_head->val;
                desc->sptes[1] = spte;
                desc->spte_count = 2;
+               desc->tail_count = 0;
                rmap_head->val = (unsigned long)desc | 1;
                ++count;
        } else {
                rmap_printk("%p %llx many->many\n", spte, *spte);
                desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
-               while (desc->spte_count == PTE_LIST_EXT) {
-                       count += PTE_LIST_EXT;
-                       if (!desc->more) {
-                               desc->more = kvm_mmu_memory_cache_alloc(cache);
-                               desc = desc->more;
-                               desc->spte_count = 0;
-                               break;
-                       }
-                       desc = desc->more;
+               count = desc->tail_count + desc->spte_count;
+
+               /*
+                * If the previous head is full, allocate a new head descriptor
+                * as tail descriptors are always kept full.
+                */
+               if (desc->spte_count == PTE_LIST_EXT) {
+                       desc = kvm_mmu_memory_cache_alloc(cache);
+                       desc->more = (struct pte_list_desc *)(rmap_head->val & ~1ul);
+                       desc->spte_count = 0;
+                       desc->tail_count = count;
+                       rmap_head->val = (unsigned long)desc | 1;
                }
-               count += desc->spte_count;
                desc->sptes[desc->spte_count++] = spte;
        }
        return count;
 }
 
-static void
-pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head,
-                          struct pte_list_desc *desc, int i,
-                          struct pte_list_desc *prev_desc)
+static void pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head,
+                                      struct pte_list_desc *desc, int i)
 {
-       int j = desc->spte_count - 1;
+       struct pte_list_desc *head_desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
+       int j = head_desc->spte_count - 1;
+
+       /*
+        * The head descriptor should never be empty.  A new head is added only
+        * when adding an entry and the previous head is full, and heads are
+        * removed (this flow) when they become empty.
+        */
+       BUG_ON(j < 0);
 
-       desc->sptes[i] = desc->sptes[j];
-       desc->sptes[j] = NULL;
-       desc->spte_count--;
-       if (desc->spte_count)
+       /*
+        * Replace the to-be-freed SPTE with the last valid entry from the head
+        * descriptor to ensure that tail descriptors are full at all times.
+        * Note, this also means that tail_count is stable for each descriptor.
+        */
+       desc->sptes[i] = head_desc->sptes[j];
+       head_desc->sptes[j] = NULL;
+       head_desc->spte_count--;
+       if (head_desc->spte_count)
                return;
-       if (!prev_desc && !desc->more)
+
+       /*
+        * The head descriptor is empty.  If there are no tail descriptors,
+        * nullify the rmap head to mark the list as emtpy, else point the rmap
+        * head at the next descriptor, i.e. the new head.
+        */
+       if (!head_desc->more)
                rmap_head->val = 0;
        else
-               if (prev_desc)
-                       prev_desc->more = desc->more;
-               else
-                       rmap_head->val = (unsigned long)desc->more | 1;
-       mmu_free_pte_list_desc(desc);
+               rmap_head->val = (unsigned long)head_desc->more | 1;
+       mmu_free_pte_list_desc(head_desc);
 }
 
 static void pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head)
 {
        struct pte_list_desc *desc;
-       struct pte_list_desc *prev_desc;
        int i;
 
        if (!rmap_head->val) {
@@ -991,16 +1024,13 @@ static void pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head)
        } else {
                rmap_printk("%p many->many\n", spte);
                desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
-               prev_desc = NULL;
                while (desc) {
                        for (i = 0; i < desc->spte_count; ++i) {
                                if (desc->sptes[i] == spte) {
-                                       pte_list_desc_remove_entry(rmap_head,
-                                                       desc, i, prev_desc);
+                                       pte_list_desc_remove_entry(rmap_head, desc, i);
                                        return;
                                }
                        }
-                       prev_desc = desc;
                        desc = desc->more;
                }
                pr_err("%s: %p many->many\n", __func__, spte);
@@ -1047,7 +1077,6 @@ out:
 unsigned int pte_list_count(struct kvm_rmap_head *rmap_head)
 {
        struct pte_list_desc *desc;
-       unsigned int count = 0;
 
        if (!rmap_head->val)
                return 0;
@@ -1055,13 +1084,7 @@ unsigned int pte_list_count(struct kvm_rmap_head *rmap_head)
                return 1;
 
        desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
-
-       while (desc) {
-               count += desc->spte_count;
-               desc = desc->more;
-       }
-
-       return count;
+       return desc->tail_count + desc->spte_count;
 }
 
 static struct kvm_rmap_head *gfn_to_rmap(gfn_t gfn, int level,
@@ -1073,14 +1096,6 @@ static struct kvm_rmap_head *gfn_to_rmap(gfn_t gfn, int level,
        return &slot->arch.rmap[level - PG_LEVEL_4K][idx];
 }
 
-static bool rmap_can_add(struct kvm_vcpu *vcpu)
-{
-       struct kvm_mmu_memory_cache *mc;
-
-       mc = &vcpu->arch.mmu_pte_list_desc_cache;
-       return kvm_mmu_memory_cache_nr_free_objects(mc);
-}
-
 static void rmap_remove(struct kvm *kvm, u64 *spte)
 {
        struct kvm_memslots *slots;
@@ -1479,7 +1494,7 @@ restart:
                }
        }
 
-       if (need_flush && kvm_available_flush_tlb_with_range()) {
+       if (need_flush && kvm_available_flush_remote_tlbs_range()) {
                kvm_flush_remote_tlbs_gfn(kvm, gfn, level);
                return false;
        }
@@ -1504,8 +1519,8 @@ struct slot_rmap_walk_iterator {
        struct kvm_rmap_head *end_rmap;
 };
 
-static void
-rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, int level)
+static void rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator,
+                                int level)
 {
        iterator->level = level;
        iterator->gfn = iterator->start_gfn;
@@ -1513,10 +1528,10 @@ rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, int level)
        iterator->end_rmap = gfn_to_rmap(iterator->end_gfn, level, iterator->slot);
 }
 
-static void
-slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator,
-                   const struct kvm_memory_slot *slot, int start_level,
-                   int end_level, gfn_t start_gfn, gfn_t end_gfn)
+static void slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator,
+                               const struct kvm_memory_slot *slot,
+                               int start_level, int end_level,
+                               gfn_t start_gfn, gfn_t end_gfn)
 {
        iterator->slot = slot;
        iterator->start_level = start_level;
@@ -1789,12 +1804,6 @@ static void mark_unsync(u64 *spte)
        kvm_mmu_mark_parents_unsync(sp);
 }
 
-static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
-                              struct kvm_mmu_page *sp)
-{
-       return -1;
-}
-
 #define KVM_PAGE_ARRAY_NR 16
 
 struct kvm_mmu_pages {
@@ -1914,10 +1923,79 @@ static bool sp_has_gptes(struct kvm_mmu_page *sp)
          &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)])     \
                if ((_sp)->gfn != (_gfn) || !sp_has_gptes(_sp)) {} else
 
+static bool kvm_sync_page_check(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
+{
+       union kvm_mmu_page_role root_role = vcpu->arch.mmu->root_role;
+
+       /*
+        * Ignore various flags when verifying that it's safe to sync a shadow
+        * page using the current MMU context.
+        *
+        *  - level: not part of the overall MMU role and will never match as the MMU's
+        *           level tracks the root level
+        *  - access: updated based on the new guest PTE
+        *  - quadrant: not part of the overall MMU role (similar to level)
+        */
+       const union kvm_mmu_page_role sync_role_ign = {
+               .level = 0xf,
+               .access = 0x7,
+               .quadrant = 0x3,
+               .passthrough = 0x1,
+       };
+
+       /*
+        * Direct pages can never be unsync, and KVM should never attempt to
+        * sync a shadow page for a different MMU context, e.g. if the role
+        * differs then the memslot lookup (SMM vs. non-SMM) will be bogus, the
+        * reserved bits checks will be wrong, etc...
+        */
+       if (WARN_ON_ONCE(sp->role.direct || !vcpu->arch.mmu->sync_spte ||
+                        (sp->role.word ^ root_role.word) & ~sync_role_ign.word))
+               return false;
+
+       return true;
+}
+
+static int kvm_sync_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, int i)
+{
+       if (!sp->spt[i])
+               return 0;
+
+       return vcpu->arch.mmu->sync_spte(vcpu, sp, i);
+}
+
+static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
+{
+       int flush = 0;
+       int i;
+
+       if (!kvm_sync_page_check(vcpu, sp))
+               return -1;
+
+       for (i = 0; i < SPTE_ENT_PER_PAGE; i++) {
+               int ret = kvm_sync_spte(vcpu, sp, i);
+
+               if (ret < -1)
+                       return -1;
+               flush |= ret;
+       }
+
+       /*
+        * Note, any flush is purely for KVM's correctness, e.g. when dropping
+        * an existing SPTE or clearing W/A/D bits to ensure an mmu_notifier
+        * unmap or dirty logging event doesn't fail to flush.  The guest is
+        * responsible for flushing the TLB to ensure any changes in protection
+        * bits are recognized, i.e. until the guest flushes or page faults on
+        * a relevant address, KVM is architecturally allowed to let vCPUs use
+        * cached translations with the old protection bits.
+        */
+       return flush;
+}
+
 static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
                         struct list_head *invalid_list)
 {
-       int ret = vcpu->arch.mmu->sync_page(vcpu, sp);
+       int ret = __kvm_sync_page(vcpu, sp);
 
        if (ret < 0)
                kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
@@ -3304,9 +3382,9 @@ static bool page_fault_can_be_fast(struct kvm_page_fault *fault)
  * Returns true if the SPTE was fixed successfully. Otherwise,
  * someone else modified the SPTE from its original value.
  */
-static bool
-fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
-                       u64 *sptep, u64 old_spte, u64 new_spte)
+static bool fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu,
+                                   struct kvm_page_fault *fault,
+                                   u64 *sptep, u64 old_spte, u64 new_spte)
 {
        /*
         * Theoretically we could also set dirty bit (and flush TLB) here in
@@ -3513,6 +3591,8 @@ void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu,
        LIST_HEAD(invalid_list);
        bool free_active_root;
 
+       WARN_ON_ONCE(roots_to_free & ~KVM_MMU_ROOTS_ALL);
+
        BUILD_BUG_ON(KVM_MMU_NUM_PREV_ROOTS >= BITS_PER_LONG);
 
        /* Before acquiring the MMU lock, see if we need to do any real work. */
@@ -3731,7 +3811,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
        int quadrant, i, r;
        hpa_t root;
 
-       root_pgd = mmu->get_guest_pgd(vcpu);
+       root_pgd = kvm_mmu_get_guest_pgd(vcpu, mmu);
        root_gfn = root_pgd >> PAGE_SHIFT;
 
        if (mmu_check_root(vcpu, root_gfn))
@@ -4181,7 +4261,7 @@ static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
        arch.token = alloc_apf_token(vcpu);
        arch.gfn = gfn;
        arch.direct_map = vcpu->arch.mmu->root_role.direct;
-       arch.cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu);
+       arch.cr3 = kvm_mmu_get_guest_pgd(vcpu, vcpu->arch.mmu);
 
        return kvm_setup_async_pf(vcpu, cr2_or_gpa,
                                  kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
@@ -4200,7 +4280,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
                return;
 
        if (!vcpu->arch.mmu->root_role.direct &&
-             work->arch.cr3 != vcpu->arch.mmu->get_guest_pgd(vcpu))
+             work->arch.cr3 != kvm_mmu_get_guest_pgd(vcpu, vcpu->arch.mmu))
                return;
 
        kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true, NULL);
@@ -4469,8 +4549,7 @@ static void nonpaging_init_context(struct kvm_mmu *context)
 {
        context->page_fault = nonpaging_page_fault;
        context->gva_to_gpa = nonpaging_gva_to_gpa;
-       context->sync_page = nonpaging_sync_page;
-       context->invlpg = NULL;
+       context->sync_spte = NULL;
 }
 
 static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd,
@@ -4604,11 +4683,6 @@ void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd)
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd);
 
-static unsigned long get_cr3(struct kvm_vcpu *vcpu)
-{
-       return kvm_read_cr3(vcpu);
-}
-
 static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
                           unsigned int access)
 {
@@ -4638,10 +4712,9 @@ static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
 #include "paging_tmpl.h"
 #undef PTTYPE
 
-static void
-__reset_rsvds_bits_mask(struct rsvd_bits_validate *rsvd_check,
-                       u64 pa_bits_rsvd, int level, bool nx, bool gbpages,
-                       bool pse, bool amd)
+static void __reset_rsvds_bits_mask(struct rsvd_bits_validate *rsvd_check,
+                                   u64 pa_bits_rsvd, int level, bool nx,
+                                   bool gbpages, bool pse, bool amd)
 {
        u64 gbpages_bit_rsvd = 0;
        u64 nonleaf_bit8_rsvd = 0;
@@ -4754,9 +4827,9 @@ static void reset_guest_rsvds_bits_mask(struct kvm_vcpu *vcpu,
                                guest_cpuid_is_amd_or_hygon(vcpu));
 }
 
-static void
-__reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
-                           u64 pa_bits_rsvd, bool execonly, int huge_page_level)
+static void __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
+                                       u64 pa_bits_rsvd, bool execonly,
+                                       int huge_page_level)
 {
        u64 high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51);
        u64 large_1g_rsvd = 0, large_2m_rsvd = 0;
@@ -4856,8 +4929,7 @@ static inline bool boot_cpu_is_amd(void)
  * the direct page table on host, use as much mmu features as
  * possible, however, kvm currently does not do execution-protection.
  */
-static void
-reset_tdp_shadow_zero_bits_mask(struct kvm_mmu *context)
+static void reset_tdp_shadow_zero_bits_mask(struct kvm_mmu *context)
 {
        struct rsvd_bits_validate *shadow_zero_check;
        int i;
@@ -5060,20 +5132,18 @@ static void paging64_init_context(struct kvm_mmu *context)
 {
        context->page_fault = paging64_page_fault;
        context->gva_to_gpa = paging64_gva_to_gpa;
-       context->sync_page = paging64_sync_page;
-       context->invlpg = paging64_invlpg;
+       context->sync_spte = paging64_sync_spte;
 }
 
 static void paging32_init_context(struct kvm_mmu *context)
 {
        context->page_fault = paging32_page_fault;
        context->gva_to_gpa = paging32_gva_to_gpa;
-       context->sync_page = paging32_sync_page;
-       context->invlpg = paging32_invlpg;
+       context->sync_spte = paging32_sync_spte;
 }
 
-static union kvm_cpu_role
-kvm_calc_cpu_role(struct kvm_vcpu *vcpu, const struct kvm_mmu_role_regs *regs)
+static union kvm_cpu_role kvm_calc_cpu_role(struct kvm_vcpu *vcpu,
+                                           const struct kvm_mmu_role_regs *regs)
 {
        union kvm_cpu_role role = {0};
 
@@ -5112,6 +5182,21 @@ kvm_calc_cpu_role(struct kvm_vcpu *vcpu, const struct kvm_mmu_role_regs *regs)
        return role;
 }
 
+void __kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu,
+                                       struct kvm_mmu *mmu)
+{
+       const bool cr0_wp = kvm_is_cr0_bit_set(vcpu, X86_CR0_WP);
+
+       BUILD_BUG_ON((KVM_MMU_CR0_ROLE_BITS & KVM_POSSIBLE_CR0_GUEST_BITS) != X86_CR0_WP);
+       BUILD_BUG_ON((KVM_MMU_CR4_ROLE_BITS & KVM_POSSIBLE_CR4_GUEST_BITS));
+
+       if (is_cr0_wp(mmu) == cr0_wp)
+               return;
+
+       mmu->cpu_role.base.cr0_wp = cr0_wp;
+       reset_guest_paging_metadata(vcpu, mmu);
+}
+
 static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu)
 {
        /* tdp_root_level is architecture forced level, use it if nonzero */
@@ -5157,9 +5242,8 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu,
        context->cpu_role.as_u64 = cpu_role.as_u64;
        context->root_role.word = root_role.word;
        context->page_fault = kvm_tdp_page_fault;
-       context->sync_page = nonpaging_sync_page;
-       context->invlpg = NULL;
-       context->get_guest_pgd = get_cr3;
+       context->sync_spte = NULL;
+       context->get_guest_pgd = get_guest_cr3;
        context->get_pdptr = kvm_pdptr_read;
        context->inject_page_fault = kvm_inject_page_fault;
 
@@ -5289,8 +5373,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
 
                context->page_fault = ept_page_fault;
                context->gva_to_gpa = ept_gva_to_gpa;
-               context->sync_page = ept_sync_page;
-               context->invlpg = ept_invlpg;
+               context->sync_spte = ept_sync_spte;
 
                update_permission_bitmask(context, true);
                context->pkru_mask = 0;
@@ -5309,7 +5392,7 @@ static void init_kvm_softmmu(struct kvm_vcpu *vcpu,
 
        kvm_init_shadow_mmu(vcpu, cpu_role);
 
-       context->get_guest_pgd     = get_cr3;
+       context->get_guest_pgd     = get_guest_cr3;
        context->get_pdptr         = kvm_pdptr_read;
        context->inject_page_fault = kvm_inject_page_fault;
 }
@@ -5323,7 +5406,7 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu,
                return;
 
        g_context->cpu_role.as_u64   = new_mode.as_u64;
-       g_context->get_guest_pgd     = get_cr3;
+       g_context->get_guest_pgd     = get_guest_cr3;
        g_context->get_pdptr         = kvm_pdptr_read;
        g_context->inject_page_fault = kvm_inject_page_fault;
 
@@ -5331,7 +5414,7 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu,
         * L2 page tables are never shadowed, so there is no need to sync
         * SPTEs.
         */
-       g_context->invlpg            = NULL;
+       g_context->sync_spte         = NULL;
 
        /*
         * Note that arch.mmu->gva_to_gpa translates l2_gpa to l1_gpa using
@@ -5707,48 +5790,77 @@ emulate:
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
 
-void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
-                           gva_t gva, hpa_t root_hpa)
+static void __kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
+                                     u64 addr, hpa_t root_hpa)
+{
+       struct kvm_shadow_walk_iterator iterator;
+
+       vcpu_clear_mmio_info(vcpu, addr);
+
+       if (!VALID_PAGE(root_hpa))
+               return;
+
+       write_lock(&vcpu->kvm->mmu_lock);
+       for_each_shadow_entry_using_root(vcpu, root_hpa, addr, iterator) {
+               struct kvm_mmu_page *sp = sptep_to_sp(iterator.sptep);
+
+               if (sp->unsync) {
+                       int ret = kvm_sync_spte(vcpu, sp, iterator.index);
+
+                       if (ret < 0)
+                               mmu_page_zap_pte(vcpu->kvm, sp, iterator.sptep, NULL);
+                       if (ret)
+                               kvm_flush_remote_tlbs_sptep(vcpu->kvm, iterator.sptep);
+               }
+
+               if (!sp->unsync_children)
+                       break;
+       }
+       write_unlock(&vcpu->kvm->mmu_lock);
+}
+
+void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
+                            u64 addr, unsigned long roots)
 {
        int i;
 
+       WARN_ON_ONCE(roots & ~KVM_MMU_ROOTS_ALL);
+
        /* It's actually a GPA for vcpu->arch.guest_mmu.  */
        if (mmu != &vcpu->arch.guest_mmu) {
                /* INVLPG on a non-canonical address is a NOP according to the SDM.  */
-               if (is_noncanonical_address(gva, vcpu))
+               if (is_noncanonical_address(addr, vcpu))
                        return;
 
-               static_call(kvm_x86_flush_tlb_gva)(vcpu, gva);
+               static_call(kvm_x86_flush_tlb_gva)(vcpu, addr);
        }
 
-       if (!mmu->invlpg)
+       if (!mmu->sync_spte)
                return;
 
-       if (root_hpa == INVALID_PAGE) {
-               mmu->invlpg(vcpu, gva, mmu->root.hpa);
+       if (roots & KVM_MMU_ROOT_CURRENT)
+               __kvm_mmu_invalidate_addr(vcpu, mmu, addr, mmu->root.hpa);
 
-               /*
-                * INVLPG is required to invalidate any global mappings for the VA,
-                * irrespective of PCID. Since it would take us roughly similar amount
-                * of work to determine whether any of the prev_root mappings of the VA
-                * is marked global, or to just sync it blindly, so we might as well
-                * just always sync it.
-                *
-                * Mappings not reachable via the current cr3 or the prev_roots will be
-                * synced when switching to that cr3, so nothing needs to be done here
-                * for them.
-                */
-               for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
-                       if (VALID_PAGE(mmu->prev_roots[i].hpa))
-                               mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
-       } else {
-               mmu->invlpg(vcpu, gva, root_hpa);
+       for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
+               if (roots & KVM_MMU_ROOT_PREVIOUS(i))
+                       __kvm_mmu_invalidate_addr(vcpu, mmu, addr, mmu->prev_roots[i].hpa);
        }
 }
+EXPORT_SYMBOL_GPL(kvm_mmu_invalidate_addr);
 
 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
 {
-       kvm_mmu_invalidate_gva(vcpu, vcpu->arch.walk_mmu, gva, INVALID_PAGE);
+       /*
+        * INVLPG is required to invalidate any global mappings for the VA,
+        * irrespective of PCID.  Blindly sync all roots as it would take
+        * roughly the same amount of work/time to determine whether any of the
+        * previous roots have a global mapping.
+        *
+        * Mappings not reachable via the current or previous cached roots will
+        * be synced when switching to that new cr3, so nothing needs to be
+        * done here for them.
+        */
+       kvm_mmu_invalidate_addr(vcpu, vcpu->arch.walk_mmu, gva, KVM_MMU_ROOTS_ALL);
        ++vcpu->stat.invlpg;
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
@@ -5757,27 +5869,20 @@ EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
 void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
 {
        struct kvm_mmu *mmu = vcpu->arch.mmu;
-       bool tlb_flush = false;
+       unsigned long roots = 0;
        uint i;
 
-       if (pcid == kvm_get_active_pcid(vcpu)) {
-               if (mmu->invlpg)
-                       mmu->invlpg(vcpu, gva, mmu->root.hpa);
-               tlb_flush = true;
-       }
+       if (pcid == kvm_get_active_pcid(vcpu))
+               roots |= KVM_MMU_ROOT_CURRENT;
 
        for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
                if (VALID_PAGE(mmu->prev_roots[i].hpa) &&
-                   pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd)) {
-                       if (mmu->invlpg)
-                               mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
-                       tlb_flush = true;
-               }
+                   pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd))
+                       roots |= KVM_MMU_ROOT_PREVIOUS(i);
        }
 
-       if (tlb_flush)
-               static_call(kvm_x86_flush_tlb_gva)(vcpu, gva);
-
+       if (roots)
+               kvm_mmu_invalidate_addr(vcpu, mmu, gva, roots);
        ++vcpu->stat.invlpg;
 
        /*
@@ -5814,29 +5919,30 @@ void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level,
 EXPORT_SYMBOL_GPL(kvm_configure_mmu);
 
 /* The return value indicates if tlb flush on all vcpus is needed. */
-typedef bool (*slot_level_handler) (struct kvm *kvm,
+typedef bool (*slot_rmaps_handler) (struct kvm *kvm,
                                    struct kvm_rmap_head *rmap_head,
                                    const struct kvm_memory_slot *slot);
 
-/* The caller should hold mmu-lock before calling this function. */
-static __always_inline bool
-slot_handle_level_range(struct kvm *kvm, const struct kvm_memory_slot *memslot,
-                       slot_level_handler fn, int start_level, int end_level,
-                       gfn_t start_gfn, gfn_t end_gfn, bool flush_on_yield,
-                       bool flush)
+static __always_inline bool __walk_slot_rmaps(struct kvm *kvm,
+                                             const struct kvm_memory_slot *slot,
+                                             slot_rmaps_handler fn,
+                                             int start_level, int end_level,
+                                             gfn_t start_gfn, gfn_t end_gfn,
+                                             bool flush_on_yield, bool flush)
 {
        struct slot_rmap_walk_iterator iterator;
 
-       for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn,
+       lockdep_assert_held_write(&kvm->mmu_lock);
+
+       for_each_slot_rmap_range(slot, start_level, end_level, start_gfn,
                        end_gfn, &iterator) {
                if (iterator.rmap)
-                       flush |= fn(kvm, iterator.rmap, memslot);
+                       flush |= fn(kvm, iterator.rmap, slot);
 
                if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
                        if (flush && flush_on_yield) {
-                               kvm_flush_remote_tlbs_with_address(kvm,
-                                               start_gfn,
-                                               iterator.gfn - start_gfn + 1);
+                               kvm_flush_remote_tlbs_range(kvm, start_gfn,
+                                                           iterator.gfn - start_gfn + 1);
                                flush = false;
                        }
                        cond_resched_rwlock_write(&kvm->mmu_lock);
@@ -5846,23 +5952,23 @@ slot_handle_level_range(struct kvm *kvm, const struct kvm_memory_slot *memslot,
        return flush;
 }
 
-static __always_inline bool
-slot_handle_level(struct kvm *kvm, const struct kvm_memory_slot *memslot,
-                 slot_level_handler fn, int start_level, int end_level,
-                 bool flush_on_yield)
+static __always_inline bool walk_slot_rmaps(struct kvm *kvm,
+                                           const struct kvm_memory_slot *slot,
+                                           slot_rmaps_handler fn,
+                                           int start_level, int end_level,
+                                           bool flush_on_yield)
 {
-       return slot_handle_level_range(kvm, memslot, fn, start_level,
-                       end_level, memslot->base_gfn,
-                       memslot->base_gfn + memslot->npages - 1,
-                       flush_on_yield, false);
+       return __walk_slot_rmaps(kvm, slot, fn, start_level, end_level,
+                                slot->base_gfn, slot->base_gfn + slot->npages - 1,
+                                flush_on_yield, false);
 }
 
-static __always_inline bool
-slot_handle_level_4k(struct kvm *kvm, const struct kvm_memory_slot *memslot,
-                    slot_level_handler fn, bool flush_on_yield)
+static __always_inline bool walk_slot_rmaps_4k(struct kvm *kvm,
+                                              const struct kvm_memory_slot *slot,
+                                              slot_rmaps_handler fn,
+                                              bool flush_on_yield)
 {
-       return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K,
-                                PG_LEVEL_4K, flush_on_yield);
+       return walk_slot_rmaps(kvm, slot, fn, PG_LEVEL_4K, PG_LEVEL_4K, flush_on_yield);
 }
 
 static void free_mmu_pages(struct kvm_mmu *mmu)
@@ -6157,9 +6263,9 @@ static bool kvm_rmap_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_e
                        if (WARN_ON_ONCE(start >= end))
                                continue;
 
-                       flush = slot_handle_level_range(kvm, memslot, __kvm_zap_rmap,
-                                                       PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
-                                                       start, end - 1, true, flush);
+                       flush = __walk_slot_rmaps(kvm, memslot, __kvm_zap_rmap,
+                                                 PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
+                                                 start, end - 1, true, flush);
                }
        }
 
@@ -6191,8 +6297,7 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
        }
 
        if (flush)
-               kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
-                                                  gfn_end - gfn_start);
+               kvm_flush_remote_tlbs_range(kvm, gfn_start, gfn_end - gfn_start);
 
        kvm_mmu_invalidate_end(kvm, 0, -1ul);
 
@@ -6212,8 +6317,8 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
 {
        if (kvm_memslots_have_rmaps(kvm)) {
                write_lock(&kvm->mmu_lock);
-               slot_handle_level(kvm, memslot, slot_rmap_write_protect,
-                                 start_level, KVM_MAX_HUGEPAGE_LEVEL, false);
+               walk_slot_rmaps(kvm, memslot, slot_rmap_write_protect,
+                               start_level, KVM_MAX_HUGEPAGE_LEVEL, false);
                write_unlock(&kvm->mmu_lock);
        }
 
@@ -6448,10 +6553,9 @@ static void kvm_shadow_mmu_try_split_huge_pages(struct kvm *kvm,
         * all the way to the target level. There's no need to split pages
         * already at the target level.
         */
-       for (level = KVM_MAX_HUGEPAGE_LEVEL; level > target_level; level--) {
-               slot_handle_level_range(kvm, slot, shadow_mmu_try_split_huge_pages,
-                                       level, level, start, end - 1, true, false);
-       }
+       for (level = KVM_MAX_HUGEPAGE_LEVEL; level > target_level; level--)
+               __walk_slot_rmaps(kvm, slot, shadow_mmu_try_split_huge_pages,
+                                 level, level, start, end - 1, true, false);
 }
 
 /* Must be called with the mmu_lock held in write-mode. */
@@ -6530,7 +6634,7 @@ restart:
                                                               PG_LEVEL_NUM)) {
                        kvm_zap_one_rmap_spte(kvm, rmap_head, sptep);
 
-                       if (kvm_available_flush_tlb_with_range())
+                       if (kvm_available_flush_remote_tlbs_range())
                                kvm_flush_remote_tlbs_sptep(kvm, sptep);
                        else
                                need_tlb_flush = 1;
@@ -6549,8 +6653,8 @@ static void kvm_rmap_zap_collapsible_sptes(struct kvm *kvm,
         * Note, use KVM_MAX_HUGEPAGE_LEVEL - 1 since there's no need to zap
         * pages that are already mapped at the maximum hugepage level.
         */
-       if (slot_handle_level(kvm, slot, kvm_mmu_zap_collapsible_spte,
-                             PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL - 1, true))
+       if (walk_slot_rmaps(kvm, slot, kvm_mmu_zap_collapsible_spte,
+                           PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL - 1, true))
                kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
 }
 
@@ -6581,8 +6685,7 @@ void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
         * is observed by any other operation on the same memslot.
         */
        lockdep_assert_held(&kvm->slots_lock);
-       kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
-                                          memslot->npages);
+       kvm_flush_remote_tlbs_range(kvm, memslot->base_gfn, memslot->npages);
 }
 
 void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
@@ -6594,7 +6697,7 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
                 * Clear dirty bits only on 4k SPTEs since the legacy MMU only
                 * support dirty logging at a 4k granularity.
                 */
-               slot_handle_level_4k(kvm, memslot, __rmap_clear_dirty, false);
+               walk_slot_rmaps_4k(kvm, memslot, __rmap_clear_dirty, false);
                write_unlock(&kvm->mmu_lock);
        }
 
@@ -6664,8 +6767,8 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
        }
 }
 
-static unsigned long
-mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+static unsigned long mmu_shrink_scan(struct shrinker *shrink,
+                                    struct shrink_control *sc)
 {
        struct kvm *kvm;
        int nr_to_scan = sc->nr_to_scan;
@@ -6723,8 +6826,8 @@ unlock:
        return freed;
 }
 
-static unsigned long
-mmu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+static unsigned long mmu_shrink_count(struct shrinker *shrink,
+                                     struct shrink_control *sc)
 {
        return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
 }
index 2cbb155c686c7dea434d2f9a5d1eed1da9be5c0c..d39af5639ce97a6a31781003a6636edb16569ec2 100644 (file)
@@ -170,14 +170,14 @@ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
                                    struct kvm_memory_slot *slot, u64 gfn,
                                    int min_level);
 
-void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
-                                       u64 start_gfn, u64 pages);
+void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t start_gfn,
+                                gfn_t nr_pages);
 
 /* Flush the given page (huge or not) of guest memory. */
 static inline void kvm_flush_remote_tlbs_gfn(struct kvm *kvm, gfn_t gfn, int level)
 {
-       kvm_flush_remote_tlbs_with_address(kvm, gfn_round_for_level(gfn, level),
-                                          KVM_PAGES_PER_HPAGE(level));
+       kvm_flush_remote_tlbs_range(kvm, gfn_round_for_level(gfn, level),
+                                   KVM_PAGES_PER_HPAGE(level));
 }
 
 unsigned int pte_list_count(struct kvm_rmap_head *rmap_head);
index a056f2773dd9014c200afdc60b354e59d8c7589d..0662e0278e706c742e3468c0bbdec9823766a8da 100644 (file)
@@ -324,7 +324,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
        trace_kvm_mmu_pagetable_walk(addr, access);
 retry_walk:
        walker->level = mmu->cpu_role.base.level;
-       pte           = mmu->get_guest_pgd(vcpu);
+       pte           = kvm_mmu_get_guest_pgd(vcpu, mmu);
        have_ad       = PT_HAVE_ACCESSED_DIRTY(mmu);
 
 #if PTTYPE == 64
@@ -519,7 +519,7 @@ static int FNAME(walk_addr)(struct guest_walker *walker,
 
 static bool
 FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
-                    u64 *spte, pt_element_t gpte, bool no_dirty_log)
+                    u64 *spte, pt_element_t gpte)
 {
        struct kvm_memory_slot *slot;
        unsigned pte_access;
@@ -535,8 +535,7 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
        pte_access = sp->role.access & FNAME(gpte_access)(gpte);
        FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte);
 
-       slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn,
-                       no_dirty_log && (pte_access & ACC_WRITE_MASK));
+       slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, pte_access & ACC_WRITE_MASK);
        if (!slot)
                return false;
 
@@ -605,7 +604,7 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
                if (is_shadow_present_pte(*spte))
                        continue;
 
-               if (!FNAME(prefetch_gpte)(vcpu, sp, spte, gptep[i], true))
+               if (!FNAME(prefetch_gpte)(vcpu, sp, spte, gptep[i]))
                        break;
        }
 }
@@ -846,64 +845,6 @@ static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
        return gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
 }
 
-static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
-{
-       struct kvm_shadow_walk_iterator iterator;
-       struct kvm_mmu_page *sp;
-       u64 old_spte;
-       int level;
-       u64 *sptep;
-
-       vcpu_clear_mmio_info(vcpu, gva);
-
-       /*
-        * No need to check return value here, rmap_can_add() can
-        * help us to skip pte prefetch later.
-        */
-       mmu_topup_memory_caches(vcpu, true);
-
-       if (!VALID_PAGE(root_hpa)) {
-               WARN_ON(1);
-               return;
-       }
-
-       write_lock(&vcpu->kvm->mmu_lock);
-       for_each_shadow_entry_using_root(vcpu, root_hpa, gva, iterator) {
-               level = iterator.level;
-               sptep = iterator.sptep;
-
-               sp = sptep_to_sp(sptep);
-               old_spte = *sptep;
-               if (is_last_spte(old_spte, level)) {
-                       pt_element_t gpte;
-                       gpa_t pte_gpa;
-
-                       if (!sp->unsync)
-                               break;
-
-                       pte_gpa = FNAME(get_level1_sp_gpa)(sp);
-                       pte_gpa += spte_index(sptep) * sizeof(pt_element_t);
-
-                       mmu_page_zap_pte(vcpu->kvm, sp, sptep, NULL);
-                       if (is_shadow_present_pte(old_spte))
-                               kvm_flush_remote_tlbs_sptep(vcpu->kvm, sptep);
-
-                       if (!rmap_can_add(vcpu))
-                               break;
-
-                       if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte,
-                                                      sizeof(pt_element_t)))
-                               break;
-
-                       FNAME(prefetch_gpte)(vcpu, sp, sptep, gpte, false);
-               }
-
-               if (!sp->unsync_children)
-                       break;
-       }
-       write_unlock(&vcpu->kvm->mmu_lock);
-}
-
 /* Note, @addr is a GPA when gva_to_gpa() translates an L2 GPA to an L1 GPA. */
 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
                               gpa_t addr, u64 access,
@@ -936,114 +877,75 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
  *   can't change unless all sptes pointing to it are nuked first.
  *
  * Returns
- * < 0: the sp should be zapped
- *   0: the sp is synced and no tlb flushing is required
- * > 0: the sp is synced and tlb flushing is required
+ * < 0: failed to sync spte
+ *   0: the spte is synced and no tlb flushing is required
+ * > 0: the spte is synced and tlb flushing is required
  */
-static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
+static int FNAME(sync_spte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, int i)
 {
-       union kvm_mmu_page_role root_role = vcpu->arch.mmu->root_role;
-       int i;
        bool host_writable;
        gpa_t first_pte_gpa;
-       bool flush = false;
-
-       /*
-        * Ignore various flags when verifying that it's safe to sync a shadow
-        * page using the current MMU context.
-        *
-        *  - level: not part of the overall MMU role and will never match as the MMU's
-        *           level tracks the root level
-        *  - access: updated based on the new guest PTE
-        *  - quadrant: not part of the overall MMU role (similar to level)
-        */
-       const union kvm_mmu_page_role sync_role_ign = {
-               .level = 0xf,
-               .access = 0x7,
-               .quadrant = 0x3,
-               .passthrough = 0x1,
-       };
+       u64 *sptep, spte;
+       struct kvm_memory_slot *slot;
+       unsigned pte_access;
+       pt_element_t gpte;
+       gpa_t pte_gpa;
+       gfn_t gfn;
 
-       /*
-        * Direct pages can never be unsync, and KVM should never attempt to
-        * sync a shadow page for a different MMU context, e.g. if the role
-        * differs then the memslot lookup (SMM vs. non-SMM) will be bogus, the
-        * reserved bits checks will be wrong, etc...
-        */
-       if (WARN_ON_ONCE(sp->role.direct ||
-                        (sp->role.word ^ root_role.word) & ~sync_role_ign.word))
-               return -1;
+       if (WARN_ON_ONCE(!sp->spt[i]))
+               return 0;
 
        first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);
+       pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
 
-       for (i = 0; i < SPTE_ENT_PER_PAGE; i++) {
-               u64 *sptep, spte;
-               struct kvm_memory_slot *slot;
-               unsigned pte_access;
-               pt_element_t gpte;
-               gpa_t pte_gpa;
-               gfn_t gfn;
-
-               if (!sp->spt[i])
-                       continue;
-
-               pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
-
-               if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte,
-                                              sizeof(pt_element_t)))
-                       return -1;
-
-               if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
-                       flush = true;
-                       continue;
-               }
-
-               gfn = gpte_to_gfn(gpte);
-               pte_access = sp->role.access;
-               pte_access &= FNAME(gpte_access)(gpte);
-               FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte);
-
-               if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access))
-                       continue;
+       if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte,
+                                      sizeof(pt_element_t)))
+               return -1;
 
-               /*
-                * Drop the SPTE if the new protections would result in a RWX=0
-                * SPTE or if the gfn is changing.  The RWX=0 case only affects
-                * EPT with execute-only support, i.e. EPT without an effective
-                * "present" bit, as all other paging modes will create a
-                * read-only SPTE if pte_access is zero.
-                */
-               if ((!pte_access && !shadow_present_mask) ||
-                   gfn != kvm_mmu_page_get_gfn(sp, i)) {
-                       drop_spte(vcpu->kvm, &sp->spt[i]);
-                       flush = true;
-                       continue;
-               }
+       if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte))
+               return 1;
 
-               /* Update the shadowed access bits in case they changed. */
-               kvm_mmu_page_set_access(sp, i, pte_access);
+       gfn = gpte_to_gfn(gpte);
+       pte_access = sp->role.access;
+       pte_access &= FNAME(gpte_access)(gpte);
+       FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte);
 
-               sptep = &sp->spt[i];
-               spte = *sptep;
-               host_writable = spte & shadow_host_writable_mask;
-               slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
-               make_spte(vcpu, sp, slot, pte_access, gfn,
-                         spte_to_pfn(spte), spte, true, false,
-                         host_writable, &spte);
+       if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access))
+               return 0;
 
-               flush |= mmu_spte_update(sptep, spte);
+       /*
+        * Drop the SPTE if the new protections would result in a RWX=0
+        * SPTE or if the gfn is changing.  The RWX=0 case only affects
+        * EPT with execute-only support, i.e. EPT without an effective
+        * "present" bit, as all other paging modes will create a
+        * read-only SPTE if pte_access is zero.
+        */
+       if ((!pte_access && !shadow_present_mask) ||
+           gfn != kvm_mmu_page_get_gfn(sp, i)) {
+               drop_spte(vcpu->kvm, &sp->spt[i]);
+               return 1;
        }
-
        /*
-        * Note, any flush is purely for KVM's correctness, e.g. when dropping
-        * an existing SPTE or clearing W/A/D bits to ensure an mmu_notifier
-        * unmap or dirty logging event doesn't fail to flush.  The guest is
-        * responsible for flushing the TLB to ensure any changes in protection
-        * bits are recognized, i.e. until the guest flushes or page faults on
-        * a relevant address, KVM is architecturally allowed to let vCPUs use
-        * cached translations with the old protection bits.
+        * Do nothing if the permissions are unchanged.  The existing SPTE is
+        * still, and prefetch_invalid_gpte() has verified that the A/D bits
+        * are set in the "new" gPTE, i.e. there is no danger of missing an A/D
+        * update due to A/D bits being set in the SPTE but not the gPTE.
         */
-       return flush;
+       if (kvm_mmu_page_get_access(sp, i) == pte_access)
+               return 0;
+
+       /* Update the shadowed access bits in case they changed. */
+       kvm_mmu_page_set_access(sp, i, pte_access);
+
+       sptep = &sp->spt[i];
+       spte = *sptep;
+       host_writable = spte & shadow_host_writable_mask;
+       slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
+       make_spte(vcpu, sp, slot, pte_access, gfn,
+                 spte_to_pfn(spte), spte, true, false,
+                 host_writable, &spte);
+
+       return mmu_spte_update(sptep, spte);
 }
 
 #undef pt_element_t
index c15bfca3ed153876d657401d79600e17b9684088..cf2c6426a6fc36a27c542c0f3e219db45d08aa94 100644 (file)
@@ -164,7 +164,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
        /*
         * For simplicity, enforce the NX huge page mitigation even if not
         * strictly necessary.  KVM could ignore the mitigation if paging is
-        * disabled in the guest, as the guest doesn't have an page tables to
+        * disabled in the guest, as the guest doesn't have any page tables to
         * abuse.  But to safely ignore the mitigation, KVM would have to
         * ensure a new MMU is loaded (or all shadow pages zapped) when CR0.PG
         * is toggled on, and that's a net negative for performance when TDP is
index f0af385c56e035e74ce9e0d01ed2f0d2ce4c5c20..fae559559a806a8121760a637430b858ec5bacab 100644 (file)
@@ -29,29 +29,49 @@ static inline void __kvm_tdp_mmu_write_spte(tdp_ptep_t sptep, u64 new_spte)
        WRITE_ONCE(*rcu_dereference(sptep), new_spte);
 }
 
+/*
+ * SPTEs must be modified atomically if they are shadow-present, leaf
+ * SPTEs, and have volatile bits, i.e. has bits that can be set outside
+ * of mmu_lock.  The Writable bit can be set by KVM's fast page fault
+ * handler, and Accessed and Dirty bits can be set by the CPU.
+ *
+ * Note, non-leaf SPTEs do have Accessed bits and those bits are
+ * technically volatile, but KVM doesn't consume the Accessed bit of
+ * non-leaf SPTEs, i.e. KVM doesn't care if it clobbers the bit.  This
+ * logic needs to be reassessed if KVM were to use non-leaf Accessed
+ * bits, e.g. to skip stepping down into child SPTEs when aging SPTEs.
+ */
+static inline bool kvm_tdp_mmu_spte_need_atomic_write(u64 old_spte, int level)
+{
+       return is_shadow_present_pte(old_spte) &&
+              is_last_spte(old_spte, level) &&
+              spte_has_volatile_bits(old_spte);
+}
+
 static inline u64 kvm_tdp_mmu_write_spte(tdp_ptep_t sptep, u64 old_spte,
                                         u64 new_spte, int level)
 {
-       /*
-        * Atomically write the SPTE if it is a shadow-present, leaf SPTE with
-        * volatile bits, i.e. has bits that can be set outside of mmu_lock.
-        * The Writable bit can be set by KVM's fast page fault handler, and
-        * Accessed and Dirty bits can be set by the CPU.
-        *
-        * Note, non-leaf SPTEs do have Accessed bits and those bits are
-        * technically volatile, but KVM doesn't consume the Accessed bit of
-        * non-leaf SPTEs, i.e. KVM doesn't care if it clobbers the bit.  This
-        * logic needs to be reassessed if KVM were to use non-leaf Accessed
-        * bits, e.g. to skip stepping down into child SPTEs when aging SPTEs.
-        */
-       if (is_shadow_present_pte(old_spte) && is_last_spte(old_spte, level) &&
-           spte_has_volatile_bits(old_spte))
+       if (kvm_tdp_mmu_spte_need_atomic_write(old_spte, level))
                return kvm_tdp_mmu_write_spte_atomic(sptep, new_spte);
 
        __kvm_tdp_mmu_write_spte(sptep, new_spte);
        return old_spte;
 }
 
+static inline u64 tdp_mmu_clear_spte_bits(tdp_ptep_t sptep, u64 old_spte,
+                                         u64 mask, int level)
+{
+       atomic64_t *sptep_atomic;
+
+       if (kvm_tdp_mmu_spte_need_atomic_write(old_spte, level)) {
+               sptep_atomic = (atomic64_t *)rcu_dereference(sptep);
+               return (u64)atomic64_fetch_and(~mask, sptep_atomic);
+       }
+
+       __kvm_tdp_mmu_write_spte(sptep, old_spte & ~mask);
+       return old_spte;
+}
+
 /*
  * A TDP iterator performs a pre-order walk over a TDP paging structure.
  */
index 7c25dbf32eccb5b6b22937de461b08638c2d3f03..b2fca11b91fff89b9a746d43705da5d04dfed1c3 100644 (file)
@@ -334,35 +334,6 @@ static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
                                u64 old_spte, u64 new_spte, int level,
                                bool shared);
 
-static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level)
-{
-       if (!is_shadow_present_pte(old_spte) || !is_last_spte(old_spte, level))
-               return;
-
-       if (is_accessed_spte(old_spte) &&
-           (!is_shadow_present_pte(new_spte) || !is_accessed_spte(new_spte) ||
-            spte_to_pfn(old_spte) != spte_to_pfn(new_spte)))
-               kvm_set_pfn_accessed(spte_to_pfn(old_spte));
-}
-
-static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn,
-                                         u64 old_spte, u64 new_spte, int level)
-{
-       bool pfn_changed;
-       struct kvm_memory_slot *slot;
-
-       if (level > PG_LEVEL_4K)
-               return;
-
-       pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
-
-       if ((!is_writable_pte(old_spte) || pfn_changed) &&
-           is_writable_pte(new_spte)) {
-               slot = __gfn_to_memslot(__kvm_memslots(kvm, as_id), gfn);
-               mark_page_dirty_in_slot(kvm, slot, gfn);
-       }
-}
-
 static void tdp_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
 {
        kvm_account_pgtable_pages((void *)sp->spt, +1);
@@ -505,7 +476,7 @@ static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)
 }
 
 /**
- * __handle_changed_spte - handle bookkeeping associated with an SPTE change
+ * handle_changed_spte - handle bookkeeping associated with an SPTE change
  * @kvm: kvm instance
  * @as_id: the address space of the paging structure the SPTE was a part of
  * @gfn: the base GFN that was mapped by the SPTE
@@ -516,12 +487,13 @@ static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)
  *         the MMU lock and the operation must synchronize with other
  *         threads that might be modifying SPTEs.
  *
- * Handle bookkeeping that might result from the modification of a SPTE.
- * This function must be called for all TDP SPTE modifications.
+ * Handle bookkeeping that might result from the modification of a SPTE.  Note,
+ * dirty logging updates are handled in common code, not here (see make_spte()
+ * and fast_pf_fix_direct_spte()).
  */
-static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
-                                 u64 old_spte, u64 new_spte, int level,
-                                 bool shared)
+static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
+                               u64 old_spte, u64 new_spte, int level,
+                               bool shared)
 {
        bool was_present = is_shadow_present_pte(old_spte);
        bool is_present = is_shadow_present_pte(new_spte);
@@ -605,17 +577,10 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
        if (was_present && !was_leaf &&
            (is_leaf || !is_present || WARN_ON_ONCE(pfn_changed)))
                handle_removed_pt(kvm, spte_to_child_pt(old_spte, level), shared);
-}
 
-static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
-                               u64 old_spte, u64 new_spte, int level,
-                               bool shared)
-{
-       __handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level,
-                             shared);
-       handle_changed_spte_acc_track(old_spte, new_spte, level);
-       handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte,
-                                     new_spte, level);
+       if (was_leaf && is_accessed_spte(old_spte) &&
+           (!is_present || !is_accessed_spte(new_spte) || pfn_changed))
+               kvm_set_pfn_accessed(spte_to_pfn(old_spte));
 }
 
 /*
@@ -658,9 +623,8 @@ static inline int tdp_mmu_set_spte_atomic(struct kvm *kvm,
        if (!try_cmpxchg64(sptep, &iter->old_spte, new_spte))
                return -EBUSY;
 
-       __handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
-                             new_spte, iter->level, true);
-       handle_changed_spte_acc_track(iter->old_spte, new_spte, iter->level);
+       handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
+                           new_spte, iter->level, true);
 
        return 0;
 }
@@ -696,7 +660,7 @@ static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm,
 
 
 /*
- * __tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping
+ * tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping
  * @kvm:             KVM instance
  * @as_id:           Address space ID, i.e. regular vs. SMM
  * @sptep:           Pointer to the SPTE
@@ -704,23 +668,12 @@ static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm,
  * @new_spte:        The new value that will be set for the SPTE
  * @gfn:             The base GFN that was (or will be) mapped by the SPTE
  * @level:           The level _containing_ the SPTE (its parent PT's level)
- * @record_acc_track: Notify the MM subsystem of changes to the accessed state
- *                   of the page. Should be set unless handling an MMU
- *                   notifier for access tracking. Leaving record_acc_track
- *                   unset in that case prevents page accesses from being
- *                   double counted.
- * @record_dirty_log: Record the page as dirty in the dirty bitmap if
- *                   appropriate for the change being made. Should be set
- *                   unless performing certain dirty logging operations.
- *                   Leaving record_dirty_log unset in that case prevents page
- *                   writes from being double counted.
  *
  * Returns the old SPTE value, which _may_ be different than @old_spte if the
  * SPTE had voldatile bits.
  */
-static u64 __tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep,
-                             u64 old_spte, u64 new_spte, gfn_t gfn, int level,
-                             bool record_acc_track, bool record_dirty_log)
+static u64 tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep,
+                           u64 old_spte, u64 new_spte, gfn_t gfn, int level)
 {
        lockdep_assert_held_write(&kvm->mmu_lock);
 
@@ -735,46 +688,17 @@ static u64 __tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep,
 
        old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte, new_spte, level);
 
-       __handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, false);
-
-       if (record_acc_track)
-               handle_changed_spte_acc_track(old_spte, new_spte, level);
-       if (record_dirty_log)
-               handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte,
-                                             new_spte, level);
+       handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, false);
        return old_spte;
 }
 
-static inline void _tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
-                                    u64 new_spte, bool record_acc_track,
-                                    bool record_dirty_log)
+static inline void tdp_mmu_iter_set_spte(struct kvm *kvm, struct tdp_iter *iter,
+                                        u64 new_spte)
 {
        WARN_ON_ONCE(iter->yielded);
-
-       iter->old_spte = __tdp_mmu_set_spte(kvm, iter->as_id, iter->sptep,
-                                           iter->old_spte, new_spte,
-                                           iter->gfn, iter->level,
-                                           record_acc_track, record_dirty_log);
-}
-
-static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
-                                   u64 new_spte)
-{
-       _tdp_mmu_set_spte(kvm, iter, new_spte, true, true);
-}
-
-static inline void tdp_mmu_set_spte_no_acc_track(struct kvm *kvm,
-                                                struct tdp_iter *iter,
-                                                u64 new_spte)
-{
-       _tdp_mmu_set_spte(kvm, iter, new_spte, false, true);
-}
-
-static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm,
-                                                struct tdp_iter *iter,
-                                                u64 new_spte)
-{
-       _tdp_mmu_set_spte(kvm, iter, new_spte, true, false);
+       iter->old_spte = tdp_mmu_set_spte(kvm, iter->as_id, iter->sptep,
+                                         iter->old_spte, new_spte,
+                                         iter->gfn, iter->level);
 }
 
 #define tdp_root_for_each_pte(_iter, _root, _start, _end) \
@@ -866,7 +790,7 @@ retry:
                        continue;
 
                if (!shared)
-                       tdp_mmu_set_spte(kvm, &iter, 0);
+                       tdp_mmu_iter_set_spte(kvm, &iter, 0);
                else if (tdp_mmu_set_spte_atomic(kvm, &iter, 0))
                        goto retry;
        }
@@ -923,8 +847,8 @@ bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
        if (WARN_ON_ONCE(!is_shadow_present_pte(old_spte)))
                return false;
 
-       __tdp_mmu_set_spte(kvm, kvm_mmu_page_as_id(sp), sp->ptep, old_spte, 0,
-                          sp->gfn, sp->role.level + 1, true, true);
+       tdp_mmu_set_spte(kvm, kvm_mmu_page_as_id(sp), sp->ptep, old_spte, 0,
+                        sp->gfn, sp->role.level + 1);
 
        return true;
 }
@@ -958,7 +882,7 @@ static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,
                    !is_last_spte(iter.old_spte, iter.level))
                        continue;
 
-               tdp_mmu_set_spte(kvm, &iter, 0);
+               tdp_mmu_iter_set_spte(kvm, &iter, 0);
                flush = true;
        }
 
@@ -1128,7 +1052,7 @@ static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter,
                if (ret)
                        return ret;
        } else {
-               tdp_mmu_set_spte(kvm, iter, spte);
+               tdp_mmu_iter_set_spte(kvm, iter, spte);
        }
 
        tdp_account_mmu_page(kvm, sp);
@@ -1262,33 +1186,42 @@ static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm,
 /*
  * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero
  * if any of the GFNs in the range have been accessed.
+ *
+ * No need to mark the corresponding PFN as accessed as this call is coming
+ * from the clear_young() or clear_flush_young() notifier, which uses the
+ * return value to determine if the page has been accessed.
  */
 static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter,
                          struct kvm_gfn_range *range)
 {
-       u64 new_spte = 0;
+       u64 new_spte;
 
        /* If we have a non-accessed entry we don't need to change the pte. */
        if (!is_accessed_spte(iter->old_spte))
                return false;
 
-       new_spte = iter->old_spte;
-
-       if (spte_ad_enabled(new_spte)) {
-               new_spte &= ~shadow_accessed_mask;
+       if (spte_ad_enabled(iter->old_spte)) {
+               iter->old_spte = tdp_mmu_clear_spte_bits(iter->sptep,
+                                                        iter->old_spte,
+                                                        shadow_accessed_mask,
+                                                        iter->level);
+               new_spte = iter->old_spte & ~shadow_accessed_mask;
        } else {
                /*
                 * Capture the dirty status of the page, so that it doesn't get
                 * lost when the SPTE is marked for access tracking.
                 */
-               if (is_writable_pte(new_spte))
-                       kvm_set_pfn_dirty(spte_to_pfn(new_spte));
+               if (is_writable_pte(iter->old_spte))
+                       kvm_set_pfn_dirty(spte_to_pfn(iter->old_spte));
 
-               new_spte = mark_spte_for_access_track(new_spte);
+               new_spte = mark_spte_for_access_track(iter->old_spte);
+               iter->old_spte = kvm_tdp_mmu_write_spte(iter->sptep,
+                                                       iter->old_spte, new_spte,
+                                                       iter->level);
        }
 
-       tdp_mmu_set_spte_no_acc_track(kvm, iter, new_spte);
-
+       trace_kvm_tdp_mmu_spte_changed(iter->as_id, iter->gfn, iter->level,
+                                      iter->old_spte, new_spte);
        return true;
 }
 
@@ -1324,15 +1257,15 @@ static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
         * Note, when changing a read-only SPTE, it's not strictly necessary to
         * zero the SPTE before setting the new PFN, but doing so preserves the
         * invariant that the PFN of a present * leaf SPTE can never change.
-        * See __handle_changed_spte().
+        * See handle_changed_spte().
         */
-       tdp_mmu_set_spte(kvm, iter, 0);
+       tdp_mmu_iter_set_spte(kvm, iter, 0);
 
        if (!pte_write(range->pte)) {
                new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte,
                                                                  pte_pfn(range->pte));
 
-               tdp_mmu_set_spte(kvm, iter, new_spte);
+               tdp_mmu_iter_set_spte(kvm, iter, new_spte);
        }
 
        return true;
@@ -1349,7 +1282,7 @@ bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
        /*
         * No need to handle the remote TLB flush under RCU protection, the
         * target SPTE _must_ be a leaf SPTE, i.e. cannot result in freeing a
-        * shadow page.  See the WARN on pfn_changed in __handle_changed_spte().
+        * shadow page. See the WARN on pfn_changed in handle_changed_spte().
         */
        return kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn);
 }
@@ -1607,8 +1540,8 @@ void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
 static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
                           gfn_t start, gfn_t end)
 {
+       u64 dbit = kvm_ad_enabled() ? shadow_dirty_mask : PT_WRITABLE_MASK;
        struct tdp_iter iter;
-       u64 new_spte;
        bool spte_set = false;
 
        rcu_read_lock();
@@ -1621,19 +1554,13 @@ retry:
                if (!is_shadow_present_pte(iter.old_spte))
                        continue;
 
-               if (spte_ad_need_write_protect(iter.old_spte)) {
-                       if (is_writable_pte(iter.old_spte))
-                               new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
-                       else
-                               continue;
-               } else {
-                       if (iter.old_spte & shadow_dirty_mask)
-                               new_spte = iter.old_spte & ~shadow_dirty_mask;
-                       else
-                               continue;
-               }
+               MMU_WARN_ON(kvm_ad_enabled() &&
+                           spte_ad_need_write_protect(iter.old_spte));
 
-               if (tdp_mmu_set_spte_atomic(kvm, &iter, new_spte))
+               if (!(iter.old_spte & dbit))
+                       continue;
+
+               if (tdp_mmu_set_spte_atomic(kvm, &iter, iter.old_spte & ~dbit))
                        goto retry;
 
                spte_set = true;
@@ -1675,8 +1602,9 @@ bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
 static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
                                  gfn_t gfn, unsigned long mask, bool wrprot)
 {
+       u64 dbit = (wrprot || !kvm_ad_enabled()) ? PT_WRITABLE_MASK :
+                                                  shadow_dirty_mask;
        struct tdp_iter iter;
-       u64 new_spte;
 
        rcu_read_lock();
 
@@ -1685,25 +1613,26 @@ static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
                if (!mask)
                        break;
 
+               MMU_WARN_ON(kvm_ad_enabled() &&
+                           spte_ad_need_write_protect(iter.old_spte));
+
                if (iter.level > PG_LEVEL_4K ||
                    !(mask & (1UL << (iter.gfn - gfn))))
                        continue;
 
                mask &= ~(1UL << (iter.gfn - gfn));
 
-               if (wrprot || spte_ad_need_write_protect(iter.old_spte)) {
-                       if (is_writable_pte(iter.old_spte))
-                               new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
-                       else
-                               continue;
-               } else {
-                       if (iter.old_spte & shadow_dirty_mask)
-                               new_spte = iter.old_spte & ~shadow_dirty_mask;
-                       else
-                               continue;
-               }
+               if (!(iter.old_spte & dbit))
+                       continue;
+
+               iter.old_spte = tdp_mmu_clear_spte_bits(iter.sptep,
+                                                       iter.old_spte, dbit,
+                                                       iter.level);
 
-               tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
+               trace_kvm_tdp_mmu_spte_changed(iter.as_id, iter.gfn, iter.level,
+                                              iter.old_spte,
+                                              iter.old_spte & ~dbit);
+               kvm_set_pfn_dirty(spte_to_pfn(iter.old_spte));
        }
 
        rcu_read_unlock();
@@ -1821,7 +1750,7 @@ static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
                if (new_spte == iter.old_spte)
                        break;
 
-               tdp_mmu_set_spte(kvm, &iter, new_spte);
+               tdp_mmu_iter_set_spte(kvm, &iter, new_spte);
                spte_set = true;
        }
 
index 597a8f8f90b940db1d10f943182d6478702656a3..1690d41c183085eb9433f69264ae9fd2066778c3 100644 (file)
@@ -543,9 +543,9 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
        if (!pmc)
                return 1;
 
-       if (!(kvm_read_cr4(vcpu) & X86_CR4_PCE) &&
+       if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_PCE) &&
            (static_call(kvm_x86_get_cpl)(vcpu) != 0) &&
-           (kvm_read_cr0(vcpu) & X86_CR0_PE))
+           kvm_is_cr0_bit_set(vcpu, X86_CR0_PE))
                return 1;
 
        *data = pmc_read_counter(pmc) & mask;
index 7584eb85410b01775b8ee91247149e1a94423baf..a64ede4f1d8a03f61805cacc8d01f73665dd0b43 100644 (file)
@@ -95,6 +95,7 @@ static const struct svm_direct_access_msrs {
 #endif
        { .index = MSR_IA32_SPEC_CTRL,                  .always = false },
        { .index = MSR_IA32_PRED_CMD,                   .always = false },
+       { .index = MSR_IA32_FLUSH_CMD,                  .always = false },
        { .index = MSR_IA32_LASTBRANCHFROMIP,           .always = false },
        { .index = MSR_IA32_LASTBRANCHTOIP,             .always = false },
        { .index = MSR_IA32_LASTINTFROMIP,              .always = false },
@@ -2869,32 +2870,10 @@ static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
        return 0;
 }
 
-static int svm_set_msr_ia32_cmd(struct kvm_vcpu *vcpu, struct msr_data *msr,
-                               bool guest_has_feat, u64 cmd,
-                               int x86_feature_bit)
-{
-       struct vcpu_svm *svm = to_svm(vcpu);
-
-       if (!msr->host_initiated && !guest_has_feat)
-               return 1;
-
-       if (!(msr->data & ~cmd))
-               return 1;
-       if (!boot_cpu_has(x86_feature_bit))
-               return 1;
-       if (!msr->data)
-               return 0;
-
-       wrmsrl(msr->index, cmd);
-       set_msr_interception(vcpu, svm->msrpm, msr->index, 0, 1);
-
-       return 0;
-}
-
 static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
-       int r;
+       int ret = 0;
 
        u32 ecx = msr->index;
        u64 data = msr->data;
@@ -2964,16 +2943,6 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
                 */
                set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1);
                break;
-       case MSR_IA32_PRED_CMD:
-               r = svm_set_msr_ia32_cmd(vcpu, msr,
-                                        guest_has_pred_cmd_msr(vcpu),
-                                        PRED_CMD_IBPB, X86_FEATURE_IBPB);
-               break;
-       case MSR_IA32_FLUSH_CMD:
-               r = svm_set_msr_ia32_cmd(vcpu, msr,
-                                        guest_cpuid_has(vcpu, X86_FEATURE_FLUSH_L1D),
-                                        L1D_FLUSH, X86_FEATURE_FLUSH_L1D);
-               break;
        case MSR_AMD64_VIRT_SPEC_CTRL:
                if (!msr->host_initiated &&
                    !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
@@ -3026,10 +2995,10 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
                 * guest via direct_access_msrs, and switch it via user return.
                 */
                preempt_disable();
-               r = kvm_set_user_return_msr(tsc_aux_uret_slot, data, -1ull);
+               ret = kvm_set_user_return_msr(tsc_aux_uret_slot, data, -1ull);
                preempt_enable();
-               if (r)
-                       return 1;
+               if (ret)
+                       break;
 
                svm->tsc_aux = data;
                break;
@@ -3087,7 +3056,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
        default:
                return kvm_set_msr_common(vcpu, msr);
        }
-       return 0;
+       return ret;
 }
 
 static int msr_interception(struct kvm_vcpu *vcpu)
@@ -4168,6 +4137,14 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
 
        svm_recalc_instruction_intercepts(vcpu, svm);
 
+       if (boot_cpu_has(X86_FEATURE_IBPB))
+               set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PRED_CMD, 0,
+                                    !!guest_has_pred_cmd_msr(vcpu));
+
+       if (boot_cpu_has(X86_FEATURE_FLUSH_L1D))
+               set_msr_interception(vcpu, svm->msrpm, MSR_IA32_FLUSH_CMD, 0,
+                                    !!guest_cpuid_has(vcpu, X86_FEATURE_FLUSH_L1D));
+
        /* For sev guests, the memory encryption bit is not reserved in CR3.  */
        if (sev_guest(vcpu->kvm)) {
                best = kvm_find_cpuid_entry(vcpu, 0x8000001F);
@@ -4545,7 +4522,6 @@ static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
                                        void *insn, int insn_len)
 {
        bool smep, smap, is_user;
-       unsigned long cr4;
        u64 error_code;
 
        /* Emulation is always possible when KVM has access to all guest state. */
@@ -4637,9 +4613,8 @@ static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
        if (error_code & (PFERR_GUEST_PAGE_MASK | PFERR_FETCH_MASK))
                goto resume_guest;
 
-       cr4 = kvm_read_cr4(vcpu);
-       smep = cr4 & X86_CR4_SMEP;
-       smap = cr4 & X86_CR4_SMAP;
+       smep = kvm_is_cr4_bit_set(vcpu, X86_CR4_SMEP);
+       smap = kvm_is_cr4_bit_set(vcpu, X86_CR4_SMAP);
        is_user = svm_get_cpl(vcpu) == 3;
        if (smap && (!smep || is_user)) {
                pr_err_ratelimited("SEV Guest triggered AMD Erratum 1096\n");
index cff838f15db5399b1fba358965e38fb8adcddea0..823001033539ce07374a99bc4575e8814722fc83 100644 (file)
@@ -35,9 +35,8 @@ static inline __init void svm_hv_hardware_setup(void)
        if (npt_enabled &&
            ms_hyperv.nested_features & HV_X64_NESTED_ENLIGHTENED_TLB) {
                pr_info(KBUILD_MODNAME ": Hyper-V enlightened NPT TLB flush enabled\n");
-               svm_x86_ops.tlb_remote_flush = hv_remote_flush_tlb;
-               svm_x86_ops.tlb_remote_flush_with_range =
-                               hv_remote_flush_tlb_with_range;
+               svm_x86_ops.flush_remote_tlbs = hv_flush_remote_tlbs;
+               svm_x86_ops.flush_remote_tlbs_range = hv_flush_remote_tlbs_range;
        }
 
        if (ms_hyperv.nested_features & HV_X64_NESTED_DIRECT_FLUSH) {
index f63b28f46a713313d911f643ea0dba342cf35240..d91c08f04dd5d1129dab8f6ed8da64af14c854ee 100644 (file)
@@ -358,6 +358,7 @@ static bool nested_ept_root_matches(hpa_t root_hpa, u64 root_eptp, u64 eptp)
 static void nested_ept_invalidate_addr(struct kvm_vcpu *vcpu, gpa_t eptp,
                                       gpa_t addr)
 {
+       unsigned long roots = 0;
        uint i;
        struct kvm_mmu_root_info *cached_root;
 
@@ -368,8 +369,10 @@ static void nested_ept_invalidate_addr(struct kvm_vcpu *vcpu, gpa_t eptp,
 
                if (nested_ept_root_matches(cached_root->hpa, cached_root->pgd,
                                            eptp))
-                       vcpu->arch.mmu->invlpg(vcpu, addr, cached_root->hpa);
+                       roots |= KVM_MMU_ROOT_PREVIOUS(i);
        }
+       if (roots)
+               kvm_mmu_invalidate_addr(vcpu, vcpu->arch.mmu, addr, roots);
 }
 
 static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
@@ -4481,7 +4484,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
         * CR0_GUEST_HOST_MASK is already set in the original vmcs01
         * (KVM doesn't change it);
         */
-       vcpu->arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS;
+       vcpu->arch.cr0_guest_owned_bits = vmx_l1_guest_owned_cr0_bits();
        vmx_set_cr0(vcpu, vmcs12->host_cr0);
 
        /* Same as above - no reason to call set_cr4_guest_host_mask().  */
@@ -4632,7 +4635,7 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
         */
        vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx));
 
-       vcpu->arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS;
+       vcpu->arch.cr0_guest_owned_bits = vmx_l1_guest_owned_cr0_bits();
        vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW));
 
        vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
@@ -5154,7 +5157,7 @@ static int handle_vmxon(struct kvm_vcpu *vcpu)
         * does force CR0.PE=1, but only to also force VM86 in order to emulate
         * Real Mode, and so there's no need to check CR0.PE manually.
         */
-       if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) {
+       if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_VMXE)) {
                kvm_queue_exception(vcpu, UD_VECTOR);
                return 1;
        }
index e89340dfa32293753cb7a7705a9fc7b9ad5ec0d3..599dd62eeb0e72a350b0307e3c1c7d5a8e0c967d 100644 (file)
@@ -164,6 +164,7 @@ module_param(allow_smaller_maxphyaddr, bool, S_IRUGO);
 static u32 vmx_possible_passthrough_msrs[MAX_POSSIBLE_PASSTHROUGH_MSRS] = {
        MSR_IA32_SPEC_CTRL,
        MSR_IA32_PRED_CMD,
+       MSR_IA32_FLUSH_CMD,
        MSR_IA32_TSC,
 #ifdef CONFIG_X86_64
        MSR_FS_BASE,
@@ -2133,39 +2134,6 @@ static u64 vmx_get_supported_debugctl(struct kvm_vcpu *vcpu, bool host_initiated
        return debugctl;
 }
 
-static int vmx_set_msr_ia32_cmd(struct kvm_vcpu *vcpu,
-                               struct msr_data *msr_info,
-                               bool guest_has_feat, u64 cmd,
-                               int x86_feature_bit)
-{
-       if (!msr_info->host_initiated && !guest_has_feat)
-               return 1;
-
-       if (!(msr_info->data & ~cmd))
-               return 1;
-       if (!boot_cpu_has(x86_feature_bit))
-               return 1;
-       if (!msr_info->data)
-               return 0;
-
-       wrmsrl(msr_info->index, cmd);
-
-       /*
-        * For non-nested:
-        * When it's written (to non-zero) for the first time, pass
-        * it through.
-        *
-        * For nested:
-        * The handling of the MSR bitmap for L2 guests is done in
-        * nested_vmx_prepare_msr_bitmap. We should not touch the
-        * vmcs02.msr_bitmap here since it gets completely overwritten
-        * in the merging.
-        */
-       vmx_disable_intercept_for_msr(vcpu, msr_info->index, MSR_TYPE_W);
-
-       return 0;
-}
-
 /*
  * Writes msr value into the appropriate "register".
  * Returns 0 on success, non-0 otherwise.
@@ -2318,18 +2286,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                if (data & ~(TSX_CTRL_RTM_DISABLE | TSX_CTRL_CPUID_CLEAR))
                        return 1;
                goto find_uret_msr;
-       case MSR_IA32_PRED_CMD:
-               ret = vmx_set_msr_ia32_cmd(vcpu, msr_info,
-                                          guest_has_pred_cmd_msr(vcpu),
-                                          PRED_CMD_IBPB,
-                                          X86_FEATURE_IBPB);
-               break;
-       case MSR_IA32_FLUSH_CMD:
-               ret = vmx_set_msr_ia32_cmd(vcpu, msr_info,
-                                          guest_cpuid_has(vcpu, X86_FEATURE_FLUSH_L1D),
-                                          L1D_FLUSH,
-                                          X86_FEATURE_FLUSH_L1D);
-               break;
        case MSR_IA32_CR_PAT:
                if (!kvm_pat_valid(data))
                        return 1;
@@ -4790,7 +4746,7 @@ static void init_vmcs(struct vcpu_vmx *vmx)
        /* 22.2.1, 20.8.1 */
        vm_entry_controls_set(vmx, vmx_vmentry_ctrl());
 
-       vmx->vcpu.arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS;
+       vmx->vcpu.arch.cr0_guest_owned_bits = vmx_l1_guest_owned_cr0_bits();
        vmcs_writel(CR0_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr0_guest_owned_bits);
 
        set_cr4_guest_host_mask(vmx);
@@ -5180,7 +5136,7 @@ bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu)
        if (!boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
                return true;
 
-       return vmx_get_cpl(vcpu) == 3 && kvm_read_cr0_bits(vcpu, X86_CR0_AM) &&
+       return vmx_get_cpl(vcpu) == 3 && kvm_is_cr0_bit_set(vcpu, X86_CR0_AM) &&
               (kvm_get_rflags(vcpu) & X86_EFLAGS_AC);
 }
 
@@ -5517,7 +5473,7 @@ static int handle_cr(struct kvm_vcpu *vcpu)
                break;
        case 3: /* lmsw */
                val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
-               trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val);
+               trace_kvm_cr_write(0, (kvm_read_cr0_bits(vcpu, ~0xful) | val));
                kvm_lmsw(vcpu, val);
 
                return kvm_skip_emulated_instruction(vcpu);
@@ -7575,7 +7531,7 @@ static u8 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
        if (!kvm_arch_has_noncoherent_dma(vcpu->kvm))
                return (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT) | VMX_EPT_IPAT_BIT;
 
-       if (kvm_read_cr0(vcpu) & X86_CR0_CD) {
+       if (kvm_read_cr0_bits(vcpu, X86_CR0_CD)) {
                if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
                        cache = MTRR_TYPE_WRBACK;
                else
@@ -7761,6 +7717,13 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
                vmx_set_intercept_for_msr(vcpu, MSR_IA32_XFD_ERR, MSR_TYPE_R,
                                          !guest_cpuid_has(vcpu, X86_FEATURE_XFD));
 
+       if (boot_cpu_has(X86_FEATURE_IBPB))
+               vmx_set_intercept_for_msr(vcpu, MSR_IA32_PRED_CMD, MSR_TYPE_W,
+                                         !guest_has_pred_cmd_msr(vcpu));
+
+       if (boot_cpu_has(X86_FEATURE_FLUSH_L1D))
+               vmx_set_intercept_for_msr(vcpu, MSR_IA32_FLUSH_CMD, MSR_TYPE_W,
+                                         !guest_cpuid_has(vcpu, X86_FEATURE_FLUSH_L1D));
 
        set_cr4_guest_host_mask(vmx);
 
@@ -8434,9 +8397,8 @@ static __init int hardware_setup(void)
 #if IS_ENABLED(CONFIG_HYPERV)
        if (ms_hyperv.nested_features & HV_X64_NESTED_GUEST_MAPPING_FLUSH
            && enable_ept) {
-               vmx_x86_ops.tlb_remote_flush = hv_remote_flush_tlb;
-               vmx_x86_ops.tlb_remote_flush_with_range =
-                               hv_remote_flush_tlb_with_range;
+               vmx_x86_ops.flush_remote_tlbs = hv_flush_remote_tlbs;
+               vmx_x86_ops.flush_remote_tlbs_range = hv_flush_remote_tlbs_range;
        }
 #endif
 
index 2acdc54bc34b18bd71d3020b837617c7ad488dd9..9e66531861cf92f532ac555a8bc70970e2358f5a 100644 (file)
@@ -369,7 +369,7 @@ struct vcpu_vmx {
        struct lbr_desc lbr_desc;
 
        /* Save desired MSR intercept (read: pass-through) state */
-#define MAX_POSSIBLE_PASSTHROUGH_MSRS  15
+#define MAX_POSSIBLE_PASSTHROUGH_MSRS  16
        struct {
                DECLARE_BITMAP(read, MAX_POSSIBLE_PASSTHROUGH_MSRS);
                DECLARE_BITMAP(write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
@@ -640,6 +640,24 @@ BUILD_CONTROLS_SHADOW(tertiary_exec, TERTIARY_VM_EXEC_CONTROL, 64)
                                (1 << VCPU_EXREG_EXIT_INFO_1) | \
                                (1 << VCPU_EXREG_EXIT_INFO_2))
 
+static inline unsigned long vmx_l1_guest_owned_cr0_bits(void)
+{
+       unsigned long bits = KVM_POSSIBLE_CR0_GUEST_BITS;
+
+       /*
+        * CR0.WP needs to be intercepted when KVM is shadowing legacy paging
+        * in order to construct shadow PTEs with the correct protections.
+        * Note!  CR0.WP technically can be passed through to the guest if
+        * paging is disabled, but checking CR0.PG would generate a cyclical
+        * dependency of sorts due to forcing the caller to ensure CR0 holds
+        * the correct value prior to determining which CR0 bits can be owned
+        * by L1.  Keep it simple and limit the optimization to EPT.
+        */
+       if (!enable_ept)
+               bits &= ~X86_CR0_WP;
+       return bits;
+}
+
 static __always_inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
 {
        return container_of(kvm, struct kvm_vmx, kvm);
index a86ad45a53b881ad5aaca5522f57882c931c874c..095a41c6f346714cb4e2266feeb560211ab889aa 100644 (file)
@@ -194,7 +194,7 @@ bool __read_mostly eager_page_split = true;
 module_param(eager_page_split, bool, 0644);
 
 /* Enable/disable SMT_RSB bug mitigation */
-bool __read_mostly mitigate_smt_rsb;
+static bool __read_mostly mitigate_smt_rsb;
 module_param(mitigate_smt_rsb, bool, 0444);
 
 /*
@@ -802,8 +802,8 @@ void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
         */
        if ((fault->error_code & PFERR_PRESENT_MASK) &&
            !(fault->error_code & PFERR_RSVD_MASK))
-               kvm_mmu_invalidate_gva(vcpu, fault_mmu, fault->address,
-                                      fault_mmu->root.hpa);
+               kvm_mmu_invalidate_addr(vcpu, fault_mmu, fault->address,
+                                       KVM_MMU_ROOT_CURRENT);
 
        fault_mmu->inject_page_fault(vcpu, fault);
 }
@@ -841,7 +841,7 @@ bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
 
 bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr)
 {
-       if ((dr != 4 && dr != 5) || !kvm_read_cr4_bits(vcpu, X86_CR4_DE))
+       if ((dr != 4 && dr != 5) || !kvm_is_cr4_bit_set(vcpu, X86_CR4_DE))
                return true;
 
        kvm_queue_exception(vcpu, UD_VECTOR);
@@ -906,6 +906,24 @@ EXPORT_SYMBOL_GPL(load_pdptrs);
 
 void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0)
 {
+       /*
+        * CR0.WP is incorporated into the MMU role, but only for non-nested,
+        * indirect shadow MMUs.  If paging is disabled, no updates are needed
+        * as there are no permission bits to emulate.  If TDP is enabled, the
+        * MMU's metadata needs to be updated, e.g. so that emulating guest
+        * translations does the right thing, but there's no need to unload the
+        * root as CR0.WP doesn't affect SPTEs.
+        */
+       if ((cr0 ^ old_cr0) == X86_CR0_WP) {
+               if (!(cr0 & X86_CR0_PG))
+                       return;
+
+               if (tdp_enabled) {
+                       kvm_init_mmu(vcpu);
+                       return;
+               }
+       }
+
        if ((cr0 ^ old_cr0) & X86_CR0_PG) {
                kvm_clear_async_pf_completion_queue(vcpu);
                kvm_async_pf_hash_reset(vcpu);
@@ -965,7 +983,7 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
                return 1;
 
        if (!(cr0 & X86_CR0_PG) &&
-           (is_64_bit_mode(vcpu) || kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)))
+           (is_64_bit_mode(vcpu) || kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE)))
                return 1;
 
        static_call(kvm_x86_set_cr0)(vcpu, cr0);
@@ -987,7 +1005,7 @@ void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu)
        if (vcpu->arch.guest_state_protected)
                return;
 
-       if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) {
+       if (kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)) {
 
                if (vcpu->arch.xcr0 != host_xcr0)
                        xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
@@ -1001,7 +1019,7 @@ void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu)
        if (static_cpu_has(X86_FEATURE_PKU) &&
            vcpu->arch.pkru != vcpu->arch.host_pkru &&
            ((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) ||
-            kvm_read_cr4_bits(vcpu, X86_CR4_PKE)))
+            kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE)))
                write_pkru(vcpu->arch.pkru);
 #endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */
 }
@@ -1015,14 +1033,14 @@ void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu)
 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
        if (static_cpu_has(X86_FEATURE_PKU) &&
            ((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) ||
-            kvm_read_cr4_bits(vcpu, X86_CR4_PKE))) {
+            kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE))) {
                vcpu->arch.pkru = rdpkru();
                if (vcpu->arch.pkru != vcpu->arch.host_pkru)
                        write_pkru(vcpu->arch.host_pkru);
        }
 #endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */
 
-       if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) {
+       if (kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)) {
 
                if (vcpu->arch.xcr0 != host_xcr0)
                        xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
@@ -1178,9 +1196,6 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
                return 1;
 
        if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) {
-               if (!guest_cpuid_has(vcpu, X86_FEATURE_PCID))
-                       return 1;
-
                /* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */
                if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu))
                        return 1;
@@ -1227,7 +1242,7 @@ static void kvm_invalidate_pcid(struct kvm_vcpu *vcpu, unsigned long pcid)
         * PCIDs for them are also 0, because MOV to CR3 always flushes the TLB
         * with PCIDE=0.
         */
-       if (!kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE))
+       if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE))
                return;
 
        for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
@@ -1242,9 +1257,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
        bool skip_tlb_flush = false;
        unsigned long pcid = 0;
 #ifdef CONFIG_X86_64
-       bool pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE);
-
-       if (pcid_enabled) {
+       if (kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE)) {
                skip_tlb_flush = cr3 & X86_CR3_PCID_NOFLUSH;
                cr3 &= ~X86_CR3_PCID_NOFLUSH;
                pcid = cr3 & X86_CR3_PCID_MASK;
@@ -3643,6 +3656,29 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                vcpu->arch.perf_capabilities = data;
                kvm_pmu_refresh(vcpu);
                break;
+       case MSR_IA32_PRED_CMD:
+               if (!msr_info->host_initiated && !guest_has_pred_cmd_msr(vcpu))
+                       return 1;
+
+               if (!boot_cpu_has(X86_FEATURE_IBPB) || (data & ~PRED_CMD_IBPB))
+                       return 1;
+               if (!data)
+                       break;
+
+               wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
+               break;
+       case MSR_IA32_FLUSH_CMD:
+               if (!msr_info->host_initiated &&
+                   !guest_cpuid_has(vcpu, X86_FEATURE_FLUSH_L1D))
+                       return 1;
+
+               if (!boot_cpu_has(X86_FEATURE_FLUSH_L1D) || (data & ~L1D_FLUSH))
+                       return 1;
+               if (!data)
+                       break;
+
+               wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
+               break;
        case MSR_EFER:
                return set_efer(vcpu, msr_info);
        case MSR_K7_HWCR:
@@ -5059,7 +5095,7 @@ static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
                return 0;
        if (mce->status & MCI_STATUS_UC) {
                if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
-                   !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
+                   !kvm_is_cr4_bit_set(vcpu, X86_CR4_MCE)) {
                        kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
                        return 0;
                }
@@ -9817,7 +9853,11 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
                vcpu->run->hypercall.args[0]  = gpa;
                vcpu->run->hypercall.args[1]  = npages;
                vcpu->run->hypercall.args[2]  = attrs;
-               vcpu->run->hypercall.longmode = op_64_bit;
+               vcpu->run->hypercall.flags    = 0;
+               if (op_64_bit)
+                       vcpu->run->hypercall.flags |= KVM_EXIT_HYPERCALL_LONG_MODE;
+
+               WARN_ON_ONCE(vcpu->run->hypercall.flags & KVM_EXIT_HYPERCALL_MBZ);
                vcpu->arch.complete_userspace_io = complete_hypercall_exit;
                return 0;
        }
@@ -13270,7 +13310,7 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva)
                return 1;
        }
 
-       pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE);
+       pcid_enabled = kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE);
 
        switch (type) {
        case INVPCID_TYPE_INDIV_ADDR:
index 4bc483d082ee3b4189ebbde483ad7374e29835b6..fbef05c0bdeb5dab69a18ea114da177f7c851e1d 100644 (file)
@@ -136,15 +136,15 @@ static inline bool kvm_exception_is_soft(unsigned int nr)
 
 static inline bool is_protmode(struct kvm_vcpu *vcpu)
 {
-       return kvm_read_cr0_bits(vcpu, X86_CR0_PE);
+       return kvm_is_cr0_bit_set(vcpu, X86_CR0_PE);
 }
 
-static inline int is_long_mode(struct kvm_vcpu *vcpu)
+static inline bool is_long_mode(struct kvm_vcpu *vcpu)
 {
 #ifdef CONFIG_X86_64
-       return vcpu->arch.efer & EFER_LMA;
+       return !!(vcpu->arch.efer & EFER_LMA);
 #else
-       return 0;
+       return false;
 #endif
 }
 
@@ -184,19 +184,19 @@ static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
        return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
 }
 
-static inline int is_pae(struct kvm_vcpu *vcpu)
+static inline bool is_pae(struct kvm_vcpu *vcpu)
 {
-       return kvm_read_cr4_bits(vcpu, X86_CR4_PAE);
+       return kvm_is_cr4_bit_set(vcpu, X86_CR4_PAE);
 }
 
-static inline int is_pse(struct kvm_vcpu *vcpu)
+static inline bool is_pse(struct kvm_vcpu *vcpu)
 {
-       return kvm_read_cr4_bits(vcpu, X86_CR4_PSE);
+       return kvm_is_cr4_bit_set(vcpu, X86_CR4_PSE);
 }
 
-static inline int is_paging(struct kvm_vcpu *vcpu)
+static inline bool is_paging(struct kvm_vcpu *vcpu)
 {
-       return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG));
+       return likely(kvm_is_cr0_bit_set(vcpu, X86_CR0_PG));
 }
 
 static inline bool is_pae_paging(struct kvm_vcpu *vcpu)
@@ -206,7 +206,7 @@ static inline bool is_pae_paging(struct kvm_vcpu *vcpu)
 
 static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu)
 {
-       return kvm_read_cr4_bits(vcpu, X86_CR4_LA57) ? 57 : 48;
+       return kvm_is_cr4_bit_set(vcpu, X86_CR4_LA57) ? 57 : 48;
 }
 
 static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu)
index 7316a822425992efec14fb5598d42f32eac43f6c..e91500a80963945c1558707404703cd69a045bb4 100644 (file)
@@ -10,6 +10,7 @@
 #include <asm/fixmap.h>
 #include <asm/desc.h>
 #include <asm/kasan.h>
+#include <asm/setup.h>
 
 static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage);
 
@@ -29,6 +30,12 @@ static __init void init_cea_offsets(void)
        unsigned int max_cea;
        unsigned int i, j;
 
+       if (!kaslr_enabled()) {
+               for_each_possible_cpu(i)
+                       per_cpu(_cea_offset, i) = i;
+               return;
+       }
+
        max_cea = (CPU_ENTRY_AREA_MAP_SIZE - PAGE_SIZE) / CPU_ENTRY_AREA_SIZE;
 
        /* O(sodding terrible) */
index 88cccd65029dba414912656310c3e72313a4aa4b..c6efcf559d8821261cb1724bf45275101a351238 100644 (file)
@@ -600,7 +600,8 @@ void __init sme_enable(struct boot_params *bp)
        cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr |
                                     ((u64)bp->ext_cmd_line_ptr << 32));
 
-       cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer));
+       if (cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)) < 0)
+               return;
 
        if (!strncmp(buffer, cmdline_on, sizeof(buffer)))
                sme_me_mask = me_mask;
index 3c5b52fbe4a7f9f69b98487d78c066f94a4064c9..a9ec8c9f5c5dd04fd7747d943b791adf9d9b1025 100644 (file)
@@ -45,6 +45,6 @@ obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o
 
 obj-$(CONFIG_XEN_DEBUG_FS)     += debugfs.o
 
-obj-$(CONFIG_XEN_PV_DOM0)      += vga.o
+obj-$(CONFIG_XEN_DOM0)         += vga.o
 
 obj-$(CONFIG_XEN_EFI)          += efi.o
index bb59cc6ddb2d425d4a782eb443969910e4508d83..093b78c8bbec0724e59947211b55e3eaae76cc8c 100644 (file)
@@ -1390,7 +1390,8 @@ asmlinkage __visible void __init xen_start_kernel(struct start_info *si)
 
                x86_platform.set_legacy_features =
                                xen_dom0_set_legacy_features;
-               xen_init_vga(info, xen_start_info->console.dom0.info_size);
+               xen_init_vga(info, xen_start_info->console.dom0.info_size,
+                            &boot_params.screen_info);
                xen_start_info->console.domU.mfn = 0;
                xen_start_info->console.domU.evtchn = 0;
 
index bcae606bbc5cfd3145aefb4f3bf2c11b5e07afe0..ada3868c02c231d0f10863cabf71a076f003acb5 100644 (file)
@@ -43,6 +43,19 @@ void __init xen_pvh_init(struct boot_params *boot_params)
        x86_init.oem.banner = xen_banner;
 
        xen_efi_init(boot_params);
+
+       if (xen_initial_domain()) {
+               struct xen_platform_op op = {
+                       .cmd = XENPF_get_dom0_console,
+               };
+               int ret = HYPERVISOR_platform_op(&op);
+
+               if (ret > 0)
+                       xen_init_vga(&op.u.dom0_console,
+                                    min(ret * sizeof(char),
+                                        sizeof(op.u.dom0_console)),
+                                    &boot_params->screen_info);
+       }
 }
 
 void __init mem_map_via_hcall(struct boot_params *boot_params_p)
index 1d597364b49dc3f9b7e8d259f1300181d4beab8e..b74ac2562cfbafddf4be10d53983515febbc7276 100644 (file)
@@ -20,6 +20,7 @@
 #include <asm/pvclock.h>
 #include <asm/xen/hypervisor.h>
 #include <asm/xen/hypercall.h>
+#include <asm/xen/cpuid.h>
 
 #include <xen/events.h>
 #include <xen/features.h>
@@ -503,11 +504,7 @@ static int __init xen_tsc_safe_clocksource(void)
        /* Leaf 4, sub-leaf 0 (0x40000x03) */
        cpuid_count(xen_cpuid_base() + 3, 0, &eax, &ebx, &ecx, &edx);
 
-       /* tsc_mode = no_emulate (2) */
-       if (ebx != 2)
-               return 0;
-
-       return 1;
+       return ebx == XEN_CPUID_TSC_MODE_NEVER_EMULATE;
 }
 
 static void __init xen_time_init(void)
index 14ea32e734d59315c5aa70e1ba484be3468a5550..d97adab8420f4c248011e87d7c43417ad3b2ca6e 100644 (file)
@@ -9,10 +9,9 @@
 
 #include "xen-ops.h"
 
-void __init xen_init_vga(const struct dom0_vga_console_info *info, size_t size)
+void __init xen_init_vga(const struct dom0_vga_console_info *info, size_t size,
+                        struct screen_info *screen_info)
 {
-       struct screen_info *screen_info = &boot_params.screen_info;
-
        /* This is drawn from a dump from vgacon:startup in
         * standard Linux. */
        screen_info->orig_video_mode = 3;
index 9a8bb972193d884e097adb1b58d71d7aa9d614e0..a10903785a33863c358de6d73ede4dd38a82be80 100644 (file)
@@ -108,11 +108,12 @@ static inline void xen_uninit_lock_cpu(int cpu)
 
 struct dom0_vga_console_info;
 
-#ifdef CONFIG_XEN_PV_DOM0
-void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size);
+#ifdef CONFIG_XEN_DOM0
+void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size,
+                        struct screen_info *);
 #else
 static inline void __init xen_init_vga(const struct dom0_vga_console_info *info,
-                                      size_t size)
+                                      size_t size, struct screen_info *si)
 {
 }
 #endif
index 5d9d9c84d51657f1c6d9e5b25bd3cac406308d76..941b2dca70db7337122df8ad9cdd1fb30bdeed1a 100644 (file)
@@ -204,9 +204,6 @@ config BLK_INLINE_ENCRYPTION_FALLBACK
 
 source "block/partitions/Kconfig"
 
-config BLOCK_COMPAT
-       def_bool COMPAT
-
 config BLK_MQ_PCI
        def_bool PCI
 
index 9e5e0277a4d95a5e2f09b6cafaf4d8cd97ee5983..42926e6cb83c8e328cfd1e904989430737cc9326 100644 (file)
@@ -959,16 +959,11 @@ again:
        }
 }
 
-unsigned long bdev_start_io_acct(struct block_device *bdev,
-                                unsigned int sectors, enum req_op op,
+unsigned long bdev_start_io_acct(struct block_device *bdev, enum req_op op,
                                 unsigned long start_time)
 {
-       const int sgrp = op_stat_group(op);
-
        part_stat_lock();
        update_io_ticks(bdev, start_time, false);
-       part_stat_inc(bdev, ios[sgrp]);
-       part_stat_add(bdev, sectors[sgrp], sectors);
        part_stat_local_inc(bdev, in_flight[op_is_write(op)]);
        part_stat_unlock();
 
@@ -984,13 +979,12 @@ EXPORT_SYMBOL(bdev_start_io_acct);
  */
 unsigned long bio_start_io_acct(struct bio *bio)
 {
-       return bdev_start_io_acct(bio->bi_bdev, bio_sectors(bio),
-                                 bio_op(bio), jiffies);
+       return bdev_start_io_acct(bio->bi_bdev, bio_op(bio), jiffies);
 }
 EXPORT_SYMBOL_GPL(bio_start_io_acct);
 
 void bdev_end_io_acct(struct block_device *bdev, enum req_op op,
-                     unsigned long start_time)
+                     unsigned int sectors, unsigned long start_time)
 {
        const int sgrp = op_stat_group(op);
        unsigned long now = READ_ONCE(jiffies);
@@ -998,6 +992,8 @@ void bdev_end_io_acct(struct block_device *bdev, enum req_op op,
 
        part_stat_lock();
        update_io_ticks(bdev, now, true);
+       part_stat_inc(bdev, ios[sgrp]);
+       part_stat_add(bdev, sectors[sgrp], sectors);
        part_stat_add(bdev, nsecs[sgrp], jiffies_to_nsecs(duration));
        part_stat_local_dec(bdev, in_flight[op_is_write(op)]);
        part_stat_unlock();
@@ -1007,7 +1003,7 @@ EXPORT_SYMBOL(bdev_end_io_acct);
 void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
                              struct block_device *orig_bdev)
 {
-       bdev_end_io_acct(orig_bdev, bio_op(bio), start_time);
+       bdev_end_io_acct(orig_bdev, bio_op(bio), bio_sectors(bio), start_time);
 }
 EXPORT_SYMBOL_GPL(bio_end_io_acct_remapped);
 
index d0cb2ef18fe21dfa07fe4eec5c76bde6d6286f05..cf1a39adf9a5b9533e057dd90593f3c203fbd7fc 100644 (file)
@@ -2725,6 +2725,7 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
        struct blk_mq_hw_ctx *this_hctx = NULL;
        struct blk_mq_ctx *this_ctx = NULL;
        struct request *requeue_list = NULL;
+       struct request **requeue_lastp = &requeue_list;
        unsigned int depth = 0;
        LIST_HEAD(list);
 
@@ -2735,10 +2736,10 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
                        this_hctx = rq->mq_hctx;
                        this_ctx = rq->mq_ctx;
                } else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx) {
-                       rq_list_add(&requeue_list, rq);
+                       rq_list_add_tail(&requeue_lastp, rq);
                        continue;
                }
-               list_add_tail(&rq->queuelist, &list);
+               list_add(&rq->queuelist, &list);
                depth++;
        } while (!rq_list_empty(plug->mq_list));
 
index ef59fee62780d301d4756000e660464078a6eaa2..a7482d2cc82e721a4c1acf8a0884b22ae9e195ac 100644 (file)
@@ -378,12 +378,13 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
 #define __blk_mq_run_dispatch_ops(q, check_sleep, dispatch_ops)        \
 do {                                                           \
        if ((q)->tag_set->flags & BLK_MQ_F_BLOCKING) {          \
+               struct blk_mq_tag_set *__tag_set = (q)->tag_set; \
                int srcu_idx;                                   \
                                                                \
                might_sleep_if(check_sleep);                    \
-               srcu_idx = srcu_read_lock((q)->tag_set->srcu);  \
+               srcu_idx = srcu_read_lock(__tag_set->srcu);     \
                (dispatch_ops);                                 \
-               srcu_read_unlock((q)->tag_set->srcu, srcu_idx); \
+               srcu_read_unlock(__tag_set->srcu, srcu_idx);    \
        } else {                                                \
                rcu_read_lock();                                \
                (dispatch_ops);                                 \
index 4fa769c4bcdb78c46c9fb1e124736ae39a32fbec..f0d4ff3c20a83275d5161e8e5c8d87793aa46ae7 100644 (file)
@@ -79,16 +79,16 @@ static int pkcs7_digest(struct pkcs7_message *pkcs7,
                }
 
                if (sinfo->msgdigest_len != sig->digest_size) {
-                       pr_debug("Sig %u: Invalid digest size (%u)\n",
-                                sinfo->index, sinfo->msgdigest_len);
+                       pr_warn("Sig %u: Invalid digest size (%u)\n",
+                               sinfo->index, sinfo->msgdigest_len);
                        ret = -EBADMSG;
                        goto error;
                }
 
                if (memcmp(sig->digest, sinfo->msgdigest,
                           sinfo->msgdigest_len) != 0) {
-                       pr_debug("Sig %u: Message digest doesn't match\n",
-                                sinfo->index);
+                       pr_warn("Sig %u: Message digest doesn't match\n",
+                               sinfo->index);
                        ret = -EKEYREJECTED;
                        goto error;
                }
@@ -478,7 +478,7 @@ int pkcs7_supply_detached_data(struct pkcs7_message *pkcs7,
                               const void *data, size_t datalen)
 {
        if (pkcs7->data) {
-               pr_debug("Data already supplied\n");
+               pr_warn("Data already supplied\n");
                return -EINVAL;
        }
        pkcs7->data = data;
index 7553ab18db898ffd887b7a7c487506780eb409e6..22beaf2213a224da9ca8f8fdf3519b071ac64a95 100644 (file)
@@ -74,7 +74,7 @@ static int pefile_parse_binary(const void *pebuf, unsigned int pelen,
                break;
 
        default:
-               pr_debug("Unknown PEOPT magic = %04hx\n", pe32->magic);
+               pr_warn("Unknown PEOPT magic = %04hx\n", pe32->magic);
                return -ELIBBAD;
        }
 
@@ -95,7 +95,7 @@ static int pefile_parse_binary(const void *pebuf, unsigned int pelen,
        ctx->certs_size = ddir->certs.size;
 
        if (!ddir->certs.virtual_address || !ddir->certs.size) {
-               pr_debug("Unsigned PE binary\n");
+               pr_warn("Unsigned PE binary\n");
                return -ENODATA;
        }
 
@@ -127,7 +127,7 @@ static int pefile_strip_sig_wrapper(const void *pebuf,
        unsigned len;
 
        if (ctx->sig_len < sizeof(wrapper)) {
-               pr_debug("Signature wrapper too short\n");
+               pr_warn("Signature wrapper too short\n");
                return -ELIBBAD;
        }
 
@@ -135,19 +135,23 @@ static int pefile_strip_sig_wrapper(const void *pebuf,
        pr_debug("sig wrapper = { %x, %x, %x }\n",
                 wrapper.length, wrapper.revision, wrapper.cert_type);
 
-       /* Both pesign and sbsign round up the length of certificate table
-        * (in optional header data directories) to 8 byte alignment.
+       /* sbsign rounds up the length of certificate table (in optional
+        * header data directories) to 8 byte alignment.  However, the PE
+        * specification states that while entries are 8-byte aligned, this is
+        * not included in their length, and as a result, pesign has not
+        * rounded up since 0.110.
         */
-       if (round_up(wrapper.length, 8) != ctx->sig_len) {
-               pr_debug("Signature wrapper len wrong\n");
+       if (wrapper.length > ctx->sig_len) {
+               pr_warn("Signature wrapper bigger than sig len (%x > %x)\n",
+                       ctx->sig_len, wrapper.length);
                return -ELIBBAD;
        }
        if (wrapper.revision != WIN_CERT_REVISION_2_0) {
-               pr_debug("Signature is not revision 2.0\n");
+               pr_warn("Signature is not revision 2.0\n");
                return -ENOTSUPP;
        }
        if (wrapper.cert_type != WIN_CERT_TYPE_PKCS_SIGNED_DATA) {
-               pr_debug("Signature certificate type is not PKCS\n");
+               pr_warn("Signature certificate type is not PKCS\n");
                return -ENOTSUPP;
        }
 
@@ -160,7 +164,7 @@ static int pefile_strip_sig_wrapper(const void *pebuf,
        ctx->sig_offset += sizeof(wrapper);
        ctx->sig_len -= sizeof(wrapper);
        if (ctx->sig_len < 4) {
-               pr_debug("Signature data missing\n");
+               pr_warn("Signature data missing\n");
                return -EKEYREJECTED;
        }
 
@@ -194,7 +198,7 @@ check_len:
                return 0;
        }
 not_pkcs7:
-       pr_debug("Signature data not PKCS#7\n");
+       pr_warn("Signature data not PKCS#7\n");
        return -ELIBBAD;
 }
 
@@ -337,8 +341,8 @@ static int pefile_digest_pe(const void *pebuf, unsigned int pelen,
        digest_size = crypto_shash_digestsize(tfm);
 
        if (digest_size != ctx->digest_len) {
-               pr_debug("Digest size mismatch (%zx != %x)\n",
-                        digest_size, ctx->digest_len);
+               pr_warn("Digest size mismatch (%zx != %x)\n",
+                       digest_size, ctx->digest_len);
                ret = -EBADMSG;
                goto error_no_desc;
        }
@@ -369,7 +373,7 @@ static int pefile_digest_pe(const void *pebuf, unsigned int pelen,
         * PKCS#7 certificate.
         */
        if (memcmp(digest, ctx->digest, ctx->digest_len) != 0) {
-               pr_debug("Digest mismatch\n");
+               pr_warn("Digest mismatch\n");
                ret = -EKEYREJECTED;
        } else {
                pr_debug("The digests match!\n");
index 07aa77aed1c8dd27dd63b0ed658cb923c5cde5f4..f22fd44d586b2eab2ed203819588fc494c22014b 100644 (file)
@@ -1,4 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0-only
 
-obj-y  += habanalabs/
-obj-y  += ivpu/
+obj-$(CONFIG_DRM_ACCEL_HABANALABS)     += habanalabs/
+obj-$(CONFIG_DRM_ACCEL_IVPU)           += ivpu/
index 10975bb603fb15c099165ad4fa15b88ab3c794ef..a35dd0e41c27043bc0cb6f8783c1bc0280cb1155 100644 (file)
@@ -536,16 +536,19 @@ static int topology_get_acpi_cpu_tag(struct acpi_table_header *table,
 static struct acpi_table_header *acpi_get_pptt(void)
 {
        static struct acpi_table_header *pptt;
+       static bool is_pptt_checked;
        acpi_status status;
 
        /*
         * PPTT will be used at runtime on every CPU hotplug in path, so we
         * don't need to call acpi_put_table() to release the table mapping.
         */
-       if (!pptt) {
+       if (!pptt && !is_pptt_checked) {
                status = acpi_get_table(ACPI_SIG_PPTT, 0, &pptt);
                if (ACPI_FAILURE(status))
                        acpi_pptt_warn_missing();
+
+               is_pptt_checked = true;
        }
 
        return pptt;
index 1278969eec1f9928ed75dbbd00aefa1620f08575..4bd16b3f0781481f6cfd54d760f47f3ad54b405a 100644 (file)
@@ -263,6 +263,12 @@ static int __init acpi_processor_driver_init(void)
        if (acpi_disabled)
                return 0;
 
+       if (!cpufreq_register_notifier(&acpi_processor_notifier_block,
+                                      CPUFREQ_POLICY_NOTIFIER)) {
+               acpi_processor_cpufreq_init = true;
+               acpi_processor_ignore_ppc_init();
+       }
+
        result = driver_register(&acpi_processor_driver);
        if (result < 0)
                return result;
@@ -276,12 +282,6 @@ static int __init acpi_processor_driver_init(void)
        cpuhp_setup_state_nocalls(CPUHP_ACPI_CPUDRV_DEAD, "acpi/cpu-drv:dead",
                                  NULL, acpi_soft_cpu_dead);
 
-       if (!cpufreq_register_notifier(&acpi_processor_notifier_block,
-                                      CPUFREQ_POLICY_NOTIFIER)) {
-               acpi_processor_cpufreq_init = true;
-               acpi_processor_ignore_ppc_init();
-       }
-
        acpi_processor_throttling_init();
        return 0;
 err:
index e534fd49a67e50877cc9ecd2672720fceed467f5..b7c6287eccca28c17908646a52af083d9e31a5dd 100644 (file)
@@ -140,9 +140,13 @@ void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy)
                ret = freq_qos_add_request(&policy->constraints,
                                           &pr->thermal_req,
                                           FREQ_QOS_MAX, INT_MAX);
-               if (ret < 0)
+               if (ret < 0) {
                        pr_err("Failed to add freq constraint for CPU%d (%d)\n",
                               cpu, ret);
+                       continue;
+               }
+
+               thermal_cooling_device_update(pr->cdev);
        }
 }
 
@@ -153,8 +157,12 @@ void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy)
        for_each_cpu(cpu, policy->related_cpus) {
                struct acpi_processor *pr = per_cpu(processors, cpu);
 
-               if (pr)
-                       freq_qos_remove_request(&pr->thermal_req);
+               if (!pr)
+                       continue;
+
+               freq_qos_remove_request(&pr->thermal_req);
+
+               thermal_cooling_device_update(pr->cdev);
        }
 }
 #else                          /* ! CONFIG_CPU_FREQ */
index 7c9125df5a651cfaca6da86adc9d6932cd97fdf4..7b4801ce62d6bff936128c4581349e7342669c07 100644 (file)
@@ -400,6 +400,13 @@ static const struct dmi_system_id medion_laptop[] = {
                        DMI_MATCH(DMI_BOARD_NAME, "M17T"),
                },
        },
+       {
+               .ident = "MEDION S17413",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
+                       DMI_MATCH(DMI_BOARD_NAME, "M1xA"),
+               },
+       },
        { }
 };
 
index 710ac640267dd301da56d7e504b1f3435fe91b42..fd7cbce8076e2395907d5ac610b19673fb7f920d 100644 (file)
@@ -495,6 +495,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
                DMI_MATCH(DMI_PRODUCT_NAME, "Precision 7510"),
                },
        },
+       {
+        .callback = video_detect_force_native,
+        /* Acer Aspire 3830TG */
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+               DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 3830TG"),
+               },
+       },
        {
         .callback = video_detect_force_native,
         /* Acer Aspire 4810T */
@@ -716,6 +724,13 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
                DMI_MATCH(DMI_PRODUCT_NAME, "Dell G15 5515"),
                },
        },
+       {
+        .callback = video_detect_force_native,
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+               DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 15 3535"),
+               },
+       },
 
        /*
         * Desktops which falsely report a backlight and which our heuristics
index e45285d4e62a423532414f2c052a6af963c39230..da5727069d851e1a7f0f799d536759d5ab8a473d 100644 (file)
@@ -251,6 +251,7 @@ bool force_storage_d3(void)
 #define ACPI_QUIRK_UART1_TTY_UART2_SKIP                                BIT(1)
 #define ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY                    BIT(2)
 #define ACPI_QUIRK_USE_ACPI_AC_AND_BATTERY                     BIT(3)
+#define ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS                    BIT(4)
 
 static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
        /*
@@ -279,6 +280,16 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
         *    need the x86-android-tablets module to properly work.
         */
 #if IS_ENABLED(CONFIG_X86_ANDROID_TABLETS)
+       {
+               /* Acer Iconia One 7 B1-750 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "VESPA2"),
+               },
+               .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+                                       ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
+                                       ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
+       },
        {
                .matches = {
                        DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
@@ -286,7 +297,19 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
                },
                .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
                                        ACPI_QUIRK_UART1_TTY_UART2_SKIP |
-                                       ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
+                                       ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
+                                       ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
+       },
+       {
+               /* Lenovo Yoga Book X90F/L */
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"),
+               },
+               .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+                                       ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
+                                       ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
        },
        {
                .matches = {
@@ -294,7 +317,8 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "TF103C"),
                },
                .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
-                                       ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
+                                       ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
+                                       ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
        },
        {
                /* Lenovo Yoga Tablet 2 1050F/L */
@@ -336,7 +360,8 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "M890BAP"),
                },
                .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
-                                       ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
+                                       ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
+                                       ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
        },
        {
                /* Whitelabel (sold as various brands) TM800A550L */
@@ -413,6 +438,20 @@ int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *s
        return 0;
 }
 EXPORT_SYMBOL_GPL(acpi_quirk_skip_serdev_enumeration);
+
+bool acpi_quirk_skip_gpio_event_handlers(void)
+{
+       const struct dmi_system_id *dmi_id;
+       long quirks;
+
+       dmi_id = dmi_first_match(acpi_quirk_skip_dmi_ids);
+       if (!dmi_id)
+               return false;
+
+       quirks = (unsigned long)dmi_id->driver_data;
+       return (quirks & ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS);
+}
+EXPORT_SYMBOL_GPL(acpi_quirk_skip_gpio_event_handlers);
 #endif
 
 /* Lists of PMIC ACPI HIDs with an (often better) native charger driver */
index 294a266a0dda579c58475678ade746f2e2c723ff..c1576d943b4364b861c85ec609fd51c21dbe46d4 100644 (file)
@@ -381,6 +381,7 @@ static void pata_parport_dev_release(struct device *dev)
 {
        struct pi_adapter *pi = container_of(dev, struct pi_adapter, dev);
 
+       ida_free(&pata_parport_bus_dev_ids, dev->id);
        kfree(pi);
 }
 
@@ -433,23 +434,27 @@ static struct pi_adapter *pi_init_one(struct parport *parport,
        if (bus_for_each_dev(&pata_parport_bus_type, NULL, &match, pi_find_dev))
                return NULL;
 
+       id = ida_alloc(&pata_parport_bus_dev_ids, GFP_KERNEL);
+       if (id < 0)
+               return NULL;
+
        pi = kzalloc(sizeof(struct pi_adapter), GFP_KERNEL);
-       if (!pi)
+       if (!pi) {
+               ida_free(&pata_parport_bus_dev_ids, id);
                return NULL;
+       }
 
        /* set up pi->dev before pi_probe_unit() so it can use dev_printk() */
        pi->dev.parent = &pata_parport_bus;
        pi->dev.bus = &pata_parport_bus_type;
        pi->dev.driver = &pr->driver;
        pi->dev.release = pata_parport_dev_release;
-       id = ida_alloc(&pata_parport_bus_dev_ids, GFP_KERNEL);
-       if (id < 0)
-               return NULL; /* pata_parport_dev_release will do kfree(pi) */
        pi->dev.id = id;
        dev_set_name(&pi->dev, "pata_parport.%u", pi->dev.id);
        if (device_register(&pi->dev)) {
                put_device(&pi->dev);
-               goto out_ida_free;
+               /* pata_parport_dev_release will do ida_free(dev->id) and kfree(pi) */
+               return NULL;
        }
 
        pi->proto = pr;
@@ -464,8 +469,7 @@ static struct pi_adapter *pi_init_one(struct parport *parport,
        pi->port = parport->base;
 
        par_cb.private = pi;
-       pi->pardev = parport_register_dev_model(parport, DRV_NAME, &par_cb,
-                                               pi->dev.id);
+       pi->pardev = parport_register_dev_model(parport, DRV_NAME, &par_cb, id);
        if (!pi->pardev)
                goto out_module_put;
 
@@ -487,12 +491,13 @@ static struct pi_adapter *pi_init_one(struct parport *parport,
 
        pi_connect(pi);
        if (ata_host_activate(host, 0, NULL, 0, &pata_parport_sht))
-               goto out_unreg_parport;
+               goto out_disconnect;
 
        return pi;
 
-out_unreg_parport:
+out_disconnect:
        pi_disconnect(pi);
+out_unreg_parport:
        parport_unregister_device(pi->pardev);
        if (pi->proto->release_proto)
                pi->proto->release_proto(pi);
@@ -500,8 +505,7 @@ out_module_put:
        module_put(pi->proto->owner);
 out_unreg_dev:
        device_unregister(&pi->dev);
-out_ida_free:
-       ida_free(&pata_parport_bus_dev_ids, pi->dev.id);
+       /* pata_parport_dev_release will do ida_free(dev->id) and kfree(pi) */
        return NULL;
 }
 
@@ -626,8 +630,7 @@ static void pi_remove_one(struct device *dev)
        pi_disconnect(pi);
        pi_release(pi);
        device_unregister(dev);
-       ida_free(&pata_parport_bus_dev_ids, dev->id);
-       /* pata_parport_dev_release will do kfree(pi) */
+       /* pata_parport_dev_release will do ida_free(dev->id) and kfree(pi) */
 }
 
 static ssize_t delete_device_store(struct bus_type *bus, const char *buf,
@@ -643,6 +646,7 @@ static ssize_t delete_device_store(struct bus_type *bus, const char *buf,
        }
 
        pi_remove_one(dev);
+       put_device(dev);
        mutex_unlock(&pi_mutex);
 
        return count;
index eec0cc2144e0227155df540cc2e0e75208e5d37b..e327a0229dc173442b2789a402a8ea0adb931cdd 100644 (file)
@@ -2909,6 +2909,7 @@ close_card_oam(struct idt77252_dev *card)
 
                                recycle_rx_pool_skb(card, &vc->rcv.rx_pool);
                        }
+                       kfree(vc);
                }
        }
 }
@@ -2952,6 +2953,15 @@ open_card_ubr0(struct idt77252_dev *card)
        return 0;
 }
 
+static void
+close_card_ubr0(struct idt77252_dev *card)
+{
+       struct vc_map *vc = card->vcs[0];
+
+       free_scq(card, vc->scq);
+       kfree(vc);
+}
+
 static int
 idt77252_dev_open(struct idt77252_dev *card)
 {
@@ -3001,6 +3011,7 @@ static void idt77252_dev_close(struct atm_dev *dev)
        struct idt77252_dev *card = dev->dev_data;
        u32 conf;
 
+       close_card_ubr0(card);
        close_card_oam(card);
 
        conf = SAR_CFG_RXPTH |  /* enable receive path           */
index 839373451c2b7dc8d2db845decfeca198fce0452..28eb59fd71ca23b19d59e14958358dd9a9a26cbb 100644 (file)
@@ -1859,35 +1859,44 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
 
 static void loop_handle_cmd(struct loop_cmd *cmd)
 {
+       struct cgroup_subsys_state *cmd_blkcg_css = cmd->blkcg_css;
+       struct cgroup_subsys_state *cmd_memcg_css = cmd->memcg_css;
        struct request *rq = blk_mq_rq_from_pdu(cmd);
        const bool write = op_is_write(req_op(rq));
        struct loop_device *lo = rq->q->queuedata;
        int ret = 0;
        struct mem_cgroup *old_memcg = NULL;
+       const bool use_aio = cmd->use_aio;
 
        if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
                ret = -EIO;
                goto failed;
        }
 
-       if (cmd->blkcg_css)
-               kthread_associate_blkcg(cmd->blkcg_css);
-       if (cmd->memcg_css)
+       if (cmd_blkcg_css)
+               kthread_associate_blkcg(cmd_blkcg_css);
+       if (cmd_memcg_css)
                old_memcg = set_active_memcg(
-                       mem_cgroup_from_css(cmd->memcg_css));
+                       mem_cgroup_from_css(cmd_memcg_css));
 
+       /*
+        * do_req_filebacked() may call blk_mq_complete_request() synchronously
+        * or asynchronously if using aio. Hence, do not touch 'cmd' after
+        * do_req_filebacked() has returned unless we are sure that 'cmd' has
+        * not yet been completed.
+        */
        ret = do_req_filebacked(lo, rq);
 
-       if (cmd->blkcg_css)
+       if (cmd_blkcg_css)
                kthread_associate_blkcg(NULL);
 
-       if (cmd->memcg_css) {
+       if (cmd_memcg_css) {
                set_active_memcg(old_memcg);
-               css_put(cmd->memcg_css);
+               css_put(cmd_memcg_css);
        }
  failed:
        /* complete non-aio request */
-       if (!cmd->use_aio || ret) {
+       if (!use_aio || ret) {
                if (ret == -EOPNOTSUPP)
                        cmd->ret = ret;
                else
index 4c601ca9552a07dd4ae9828c6703d1dc3d694b4c..9e6b032c8ecc2c93a2dffb32dcdcb0e6100f575b 100644 (file)
@@ -1413,8 +1413,7 @@ static inline void nullb_complete_cmd(struct nullb_cmd *cmd)
        case NULL_IRQ_SOFTIRQ:
                switch (cmd->nq->dev->queue_mode) {
                case NULL_Q_MQ:
-                       if (likely(!blk_should_fake_timeout(cmd->rq->q)))
-                               blk_mq_complete_request(cmd->rq);
+                       blk_mq_complete_request(cmd->rq);
                        break;
                case NULL_Q_BIO:
                        /*
@@ -1658,12 +1657,13 @@ static enum blk_eh_timer_return null_timeout_rq(struct request *rq)
 }
 
 static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
-                        const struct blk_mq_queue_data *bd)
+                                 const struct blk_mq_queue_data *bd)
 {
-       struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
+       struct request *rq = bd->rq;
+       struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
        struct nullb_queue *nq = hctx->driver_data;
-       sector_t nr_sectors = blk_rq_sectors(bd->rq);
-       sector_t sector = blk_rq_pos(bd->rq);
+       sector_t nr_sectors = blk_rq_sectors(rq);
+       sector_t sector = blk_rq_pos(rq);
        const bool is_poll = hctx->type == HCTX_TYPE_POLL;
 
        might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
@@ -1672,14 +1672,15 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
                hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
                cmd->timer.function = null_cmd_timer_expired;
        }
-       cmd->rq = bd->rq;
+       cmd->rq = rq;
        cmd->error = BLK_STS_OK;
        cmd->nq = nq;
-       cmd->fake_timeout = should_timeout_request(bd->rq);
+       cmd->fake_timeout = should_timeout_request(rq) ||
+               blk_should_fake_timeout(rq->q);
 
-       blk_mq_start_request(bd->rq);
+       blk_mq_start_request(rq);
 
-       if (should_requeue_request(bd->rq)) {
+       if (should_requeue_request(rq)) {
                /*
                 * Alternate between hitting the core BUSY path, and the
                 * driver driven requeue path
@@ -1687,22 +1688,20 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
                nq->requeue_selection++;
                if (nq->requeue_selection & 1)
                        return BLK_STS_RESOURCE;
-               else {
-                       blk_mq_requeue_request(bd->rq, true);
-                       return BLK_STS_OK;
-               }
+               blk_mq_requeue_request(rq, true);
+               return BLK_STS_OK;
        }
 
        if (is_poll) {
                spin_lock(&nq->poll_lock);
-               list_add_tail(&bd->rq->queuelist, &nq->poll_list);
+               list_add_tail(&rq->queuelist, &nq->poll_list);
                spin_unlock(&nq->poll_lock);
                return BLK_STS_OK;
        }
        if (cmd->fake_timeout)
                return BLK_STS_OK;
 
-       return null_handle_cmd(cmd, sector, nr_sectors, req_op(bd->rq));
+       return null_handle_cmd(cmd, sector, nr_sectors, req_op(rq));
 }
 
 static void cleanup_queue(struct nullb_queue *nq)
index fb855da971ee7b388efd1f385fe6c929bda97d4f..9fa821fa76b07b47d673f7d57c591fc8e5ddd4a9 100644 (file)
@@ -972,6 +972,8 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
        print_version();
 
        hp = mdesc_grab();
+       if (!hp)
+               return -ENODEV;
 
        err = -ENODEV;
        if ((vdev->dev_no << PARTITION_SHIFT) & ~(u64)MINORMASK) {
index d1d1c8d606c8d8e9c79f094ffc626e2909df7b14..c73cc57ec5477fa727e038377277d66f0d4796d3 100644 (file)
@@ -715,7 +715,8 @@ static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
        }
 }
 
-static void ubq_complete_io_cmd(struct ublk_io *io, int res)
+static void ubq_complete_io_cmd(struct ublk_io *io, int res,
+                               unsigned issue_flags)
 {
        /* mark this cmd owned by ublksrv */
        io->flags |= UBLK_IO_FLAG_OWNED_BY_SRV;
@@ -727,7 +728,7 @@ static void ubq_complete_io_cmd(struct ublk_io *io, int res)
        io->flags &= ~UBLK_IO_FLAG_ACTIVE;
 
        /* tell ublksrv one io request is coming */
-       io_uring_cmd_done(io->cmd, res, 0);
+       io_uring_cmd_done(io->cmd, res, 0, issue_flags);
 }
 
 #define UBLK_REQUEUE_DELAY_MS  3
@@ -744,7 +745,8 @@ static inline void __ublk_abort_rq(struct ublk_queue *ubq,
        mod_delayed_work(system_wq, &ubq->dev->monitor_work, 0);
 }
 
-static inline void __ublk_rq_task_work(struct request *req)
+static inline void __ublk_rq_task_work(struct request *req,
+                                      unsigned issue_flags)
 {
        struct ublk_queue *ubq = req->mq_hctx->driver_data;
        int tag = req->tag;
@@ -782,7 +784,7 @@ static inline void __ublk_rq_task_work(struct request *req)
                        pr_devel("%s: need get data. op %d, qid %d tag %d io_flags %x\n",
                                        __func__, io->cmd->cmd_op, ubq->q_id,
                                        req->tag, io->flags);
-                       ubq_complete_io_cmd(io, UBLK_IO_RES_NEED_GET_DATA);
+                       ubq_complete_io_cmd(io, UBLK_IO_RES_NEED_GET_DATA, issue_flags);
                        return;
                }
                /*
@@ -820,17 +822,18 @@ static inline void __ublk_rq_task_work(struct request *req)
                        mapped_bytes >> 9;
        }
 
-       ubq_complete_io_cmd(io, UBLK_IO_RES_OK);
+       ubq_complete_io_cmd(io, UBLK_IO_RES_OK, issue_flags);
 }
 
-static inline void ublk_forward_io_cmds(struct ublk_queue *ubq)
+static inline void ublk_forward_io_cmds(struct ublk_queue *ubq,
+                                       unsigned issue_flags)
 {
        struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds);
        struct ublk_rq_data *data, *tmp;
 
        io_cmds = llist_reverse_order(io_cmds);
        llist_for_each_entry_safe(data, tmp, io_cmds, node)
-               __ublk_rq_task_work(blk_mq_rq_from_pdu(data));
+               __ublk_rq_task_work(blk_mq_rq_from_pdu(data), issue_flags);
 }
 
 static inline void ublk_abort_io_cmds(struct ublk_queue *ubq)
@@ -842,12 +845,12 @@ static inline void ublk_abort_io_cmds(struct ublk_queue *ubq)
                __ublk_abort_rq(ubq, blk_mq_rq_from_pdu(data));
 }
 
-static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd)
+static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd, unsigned issue_flags)
 {
        struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
        struct ublk_queue *ubq = pdu->ubq;
 
-       ublk_forward_io_cmds(ubq);
+       ublk_forward_io_cmds(ubq, issue_flags);
 }
 
 static void ublk_rq_task_work_fn(struct callback_head *work)
@@ -856,8 +859,9 @@ static void ublk_rq_task_work_fn(struct callback_head *work)
                        struct ublk_rq_data, work);
        struct request *req = blk_mq_rq_from_pdu(data);
        struct ublk_queue *ubq = req->mq_hctx->driver_data;
+       unsigned issue_flags = IO_URING_F_UNLOCKED;
 
-       ublk_forward_io_cmds(ubq);
+       ublk_forward_io_cmds(ubq, issue_flags);
 }
 
 static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
@@ -1111,7 +1115,8 @@ static void ublk_cancel_queue(struct ublk_queue *ubq)
                struct ublk_io *io = &ubq->ios[i];
 
                if (io->flags & UBLK_IO_FLAG_ACTIVE)
-                       io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0);
+                       io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0,
+                                               IO_URING_F_UNLOCKED);
        }
 
        /* all io commands are canceled */
@@ -1351,7 +1356,7 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
        return -EIOCBQUEUED;
 
  out:
-       io_uring_cmd_done(cmd, ret, 0);
+       io_uring_cmd_done(cmd, ret, 0, issue_flags);
        pr_devel("%s: complete: cmd op %d, tag %d ret %x io_flags %x\n",
                        __func__, cmd_op, tag, ret, io->flags);
        return -EIOCBQUEUED;
@@ -1602,17 +1607,18 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
                set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
 
        get_device(&ub->cdev_dev);
+       ub->dev_info.state = UBLK_S_DEV_LIVE;
        ret = add_disk(disk);
        if (ret) {
                /*
                 * Has to drop the reference since ->free_disk won't be
                 * called in case of add_disk failure.
                 */
+               ub->dev_info.state = UBLK_S_DEV_DEAD;
                ublk_put_device(ub);
                goto out_put_disk;
        }
        set_bit(UB_STATE_USED, &ub->state);
-       ub->dev_info.state = UBLK_S_DEV_LIVE;
 out_put_disk:
        if (ret)
                put_disk(disk);
@@ -2233,7 +2239,7 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
        if (ub)
                ublk_put_device(ub);
  out:
-       io_uring_cmd_done(cmd, ret, 0);
+       io_uring_cmd_done(cmd, ret, 0, issue_flags);
        pr_devel("%s: cmd done ret %d cmd_op %x, dev id %d qid %d\n",
                        __func__, ret, cmd->cmd_op, header->dev_id, header->queue_id);
        return -EIOCBQUEUED;
index bede8b0055940b69ad75c5f02aa6722d26627836..af774688f1c0dacf18e6cce1749c558bfcd6bd3f 100644 (file)
 #define ECDSA_HEADER_LEN       320
 
 #define BTINTEL_PPAG_NAME   "PPAG"
-#define BTINTEL_PPAG_PREFIX "\\_SB_.PCI0.XHCI.RHUB"
+
+/* structure to store the PPAG data read from ACPI table */
+struct btintel_ppag {
+       u32     domain;
+       u32     mode;
+       acpi_status status;
+       struct hci_dev *hdev;
+};
 
 #define CMD_WRITE_BOOT_PARAMS  0xfc0e
 struct cmd_write_boot_params {
@@ -1295,17 +1302,16 @@ static acpi_status btintel_ppag_callback(acpi_handle handle, u32 lvl, void *data
 
        status = acpi_get_name(handle, ACPI_FULL_PATHNAME, &string);
        if (ACPI_FAILURE(status)) {
-               bt_dev_warn(hdev, "ACPI Failure: %s", acpi_format_exception(status));
+               bt_dev_warn(hdev, "PPAG-BT: ACPI Failure: %s", acpi_format_exception(status));
                return status;
        }
 
-       if (strncmp(BTINTEL_PPAG_PREFIX, string.pointer,
-                   strlen(BTINTEL_PPAG_PREFIX))) {
+       len = strlen(string.pointer);
+       if (len < strlen(BTINTEL_PPAG_NAME)) {
                kfree(string.pointer);
                return AE_OK;
        }
 
-       len = strlen(string.pointer);
        if (strncmp((char *)string.pointer + len - 4, BTINTEL_PPAG_NAME, 4)) {
                kfree(string.pointer);
                return AE_OK;
@@ -1314,7 +1320,8 @@ static acpi_status btintel_ppag_callback(acpi_handle handle, u32 lvl, void *data
 
        status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
        if (ACPI_FAILURE(status)) {
-               bt_dev_warn(hdev, "ACPI Failure: %s", acpi_format_exception(status));
+               ppag->status = status;
+               bt_dev_warn(hdev, "PPAG-BT: ACPI Failure: %s", acpi_format_exception(status));
                return status;
        }
 
@@ -1323,8 +1330,9 @@ static acpi_status btintel_ppag_callback(acpi_handle handle, u32 lvl, void *data
 
        if (p->type != ACPI_TYPE_PACKAGE || p->package.count != 2) {
                kfree(buffer.pointer);
-               bt_dev_warn(hdev, "Invalid object type: %d or package count: %d",
+               bt_dev_warn(hdev, "PPAG-BT: Invalid object type: %d or package count: %d",
                            p->type, p->package.count);
+               ppag->status = AE_ERROR;
                return AE_ERROR;
        }
 
@@ -1335,6 +1343,7 @@ static acpi_status btintel_ppag_callback(acpi_handle handle, u32 lvl, void *data
 
        ppag->domain = (u32)p->package.elements[0].integer.value;
        ppag->mode = (u32)p->package.elements[1].integer.value;
+       ppag->status = AE_OK;
        kfree(buffer.pointer);
        return AE_CTRL_TERMINATE;
 }
@@ -2314,12 +2323,12 @@ error:
 
 static void btintel_set_ppag(struct hci_dev *hdev, struct intel_version_tlv *ver)
 {
-       acpi_status status;
        struct btintel_ppag ppag;
        struct sk_buff *skb;
        struct btintel_loc_aware_reg ppag_cmd;
+       acpi_handle handle;
 
-    /* PPAG is not supported if CRF is HrP2, Jfp2, JfP1 */
+       /* PPAG is not supported if CRF is HrP2, Jfp2, JfP1 */
        switch (ver->cnvr_top & 0xFFF) {
        case 0x504:     /* Hrp2 */
        case 0x202:     /* Jfp2 */
@@ -2327,29 +2336,35 @@ static void btintel_set_ppag(struct hci_dev *hdev, struct intel_version_tlv *ver
                return;
        }
 
+       handle = ACPI_HANDLE(GET_HCIDEV_DEV(hdev));
+       if (!handle) {
+               bt_dev_info(hdev, "No support for BT device in ACPI firmware");
+               return;
+       }
+
        memset(&ppag, 0, sizeof(ppag));
 
        ppag.hdev = hdev;
-       status = acpi_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT,
-                                    ACPI_UINT32_MAX, NULL,
-                                    btintel_ppag_callback, &ppag, NULL);
+       ppag.status = AE_NOT_FOUND;
+       acpi_walk_namespace(ACPI_TYPE_PACKAGE, handle, 1, NULL,
+                           btintel_ppag_callback, &ppag, NULL);
 
-       if (ACPI_FAILURE(status)) {
-               /* Do not log warning message if ACPI entry is not found */
-               if (status == AE_NOT_FOUND)
+       if (ACPI_FAILURE(ppag.status)) {
+               if (ppag.status == AE_NOT_FOUND) {
+                       bt_dev_dbg(hdev, "PPAG-BT: ACPI entry not found");
                        return;
-               bt_dev_warn(hdev, "PPAG: ACPI Failure: %s", acpi_format_exception(status));
+               }
                return;
        }
 
        if (ppag.domain != 0x12) {
-               bt_dev_warn(hdev, "PPAG-BT Domain disabled");
+               bt_dev_warn(hdev, "PPAG-BT: domain is not bluetooth");
                return;
        }
 
        /* PPAG mode, BIT0 = 0 Disabled, BIT0 = 1 Enabled */
        if (!(ppag.mode & BIT(0))) {
-               bt_dev_dbg(hdev, "PPAG disabled");
+               bt_dev_dbg(hdev, "PPAG-BT: disabled");
                return;
        }
 
index 8e7da877efae6a8f114c0b1e95c654a96889a129..8fdb65b66315a7c93d5794a14dfd6e3a894de4fc 100644 (file)
@@ -137,13 +137,6 @@ struct intel_offload_use_cases {
        __u8    preset[8];
 } __packed;
 
-/* structure to store the PPAG data read from ACPI table */
-struct btintel_ppag {
-       u32     domain;
-       u32     mode;
-       struct hci_dev *hdev;
-};
-
 struct btintel_loc_aware_reg {
        __le32 mcc;
        __le32 sel;
index 2acb719e596f59e930c520b5030f0c6d0e5a55cb..11c7e04bf3947e37039e32cbe234b9cabec2e6e6 100644 (file)
@@ -122,6 +122,21 @@ static int btqcomsmd_setup(struct hci_dev *hdev)
        return 0;
 }
 
+static int btqcomsmd_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+       int ret;
+
+       ret = qca_set_bdaddr_rome(hdev, bdaddr);
+       if (ret)
+               return ret;
+
+       /* The firmware stops responding for a while after setting the bdaddr,
+        * causing timeouts for subsequent commands. Sleep a bit to avoid this.
+        */
+       usleep_range(1000, 10000);
+       return 0;
+}
+
 static int btqcomsmd_probe(struct platform_device *pdev)
 {
        struct btqcomsmd *btq;
@@ -162,7 +177,7 @@ static int btqcomsmd_probe(struct platform_device *pdev)
        hdev->close = btqcomsmd_close;
        hdev->send = btqcomsmd_send;
        hdev->setup = btqcomsmd_setup;
-       hdev->set_bdaddr = qca_set_bdaddr_rome;
+       hdev->set_bdaddr = btqcomsmd_set_bdaddr;
 
        ret = hci_register_dev(hdev);
        if (ret < 0)
index 795be33f2892d5ea41afbcb461c4e0c8e939ef2a..02893600db390402858ebf65618832f79cadc375 100644 (file)
@@ -354,6 +354,7 @@ static void btsdio_remove(struct sdio_func *func)
 
        BT_DBG("func %p", func);
 
+       cancel_work_sync(&data->work);
        if (!data)
                return;
 
index 18bc94718711592e2d559077afe010cc50802c4b..5c536151ef8367362517bd8a50a5b47a2b7ce1f9 100644 (file)
@@ -1050,21 +1050,11 @@ static int btusb_recv_bulk(struct btusb_data *data, void *buffer, int count)
                hci_skb_expect(skb) -= len;
 
                if (skb->len == HCI_ACL_HDR_SIZE) {
-                       __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
                        __le16 dlen = hci_acl_hdr(skb)->dlen;
-                       __u8 type;
 
                        /* Complete ACL header */
                        hci_skb_expect(skb) = __le16_to_cpu(dlen);
 
-                       /* Detect if ISO packet has been sent over bulk */
-                       if (hci_conn_num(data->hdev, ISO_LINK)) {
-                               type = hci_conn_lookup_type(data->hdev,
-                                                           hci_handle(handle));
-                               if (type == ISO_LINK)
-                                       hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
-                       }
-
                        if (skb_tailroom(skb) < hci_skb_expect(skb)) {
                                kfree_skb(skb);
                                skb = NULL;
index 2a6b4f676458612e00b0551fad9aa51ea57ff6b7..36d42484142aede2a409279f3626f3ffc3e959e9 100644 (file)
@@ -204,8 +204,8 @@ static int weim_parse_dt(struct platform_device *pdev)
        const struct of_device_id *of_id = of_match_device(weim_id_table,
                                                           &pdev->dev);
        const struct imx_weim_devtype *devtype = of_id->data;
+       int ret = 0, have_child = 0;
        struct device_node *child;
-       int ret, have_child = 0;
        struct weim_priv *priv;
        void __iomem *base;
        u32 reg;
index b6c5bf69a2b2c65aa24c2dfcfb6050c1412708d8..1eef05bb1f995eea850c3a18697a6d2f11106215 100644 (file)
@@ -91,7 +91,7 @@ config COMMON_CLK_RK808
 config COMMON_CLK_HI655X
        tristate "Clock driver for Hi655x" if EXPERT
        depends on (MFD_HI655X_PMIC || COMPILE_TEST)
-       depends on REGMAP
+       select REGMAP
        default MFD_HI655X_PMIC
        help
          This driver supports the hi655x PMIC clock. This
index 290a2846a86b654034293308f33602114c85dae1..0fafa5cba4427d219d6337d20b15e26ff09fd32c 100644 (file)
@@ -69,4 +69,3 @@ builtin_platform_driver(bcm2835_aux_clk_driver);
 
 MODULE_AUTHOR("Eric Anholt <eric@anholt.net>");
 MODULE_DESCRIPTION("BCM2835 auxiliary peripheral clock driver");
-MODULE_LICENSE("GPL");
index e74fe6219d14e2e0c39b65a5e9b2a9cef7c42d9a..8dc476ef5bf975e8251c693304d7f91a1fe57a57 100644 (file)
@@ -2350,4 +2350,3 @@ builtin_platform_driver(bcm2835_clk_driver);
 
 MODULE_AUTHOR("Eric Anholt <eric@anholt.net>");
 MODULE_DESCRIPTION("BCM2835 clock driver");
-MODULE_LICENSE("GPL");
index 5225d17d6b3f39210dc19de7ffb861f4544e124a..8609fca29cc4e08ddd49066392acf563bc95891e 100644 (file)
@@ -99,4 +99,3 @@ module_platform_driver(of_fixed_mmio_clk_driver);
 
 MODULE_AUTHOR("Jan Kotas <jank@cadence.com>");
 MODULE_DESCRIPTION("Memory Mapped IO Fixed clock driver");
-MODULE_LICENSE("GPL v2");
index 6238fcea04673052e60e6a2293f021bd44d34d40..ee5baf993ff21d3b9c6bda31db1a5b6c83a38f49 100644 (file)
@@ -88,5 +88,4 @@ module_platform_driver(fsl_sai_clk_driver);
 
 MODULE_DESCRIPTION("Freescale SAI bitclock-as-a-clock driver");
 MODULE_AUTHOR("Michael Walle <michael@walle.cc>");
-MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:fsl-sai-clk");
index 67a7cb3503c3697b169e436f5aeb361de4e2febe..4eed667eddaf25b4e3bd974ade93a0243109bf6a 100644 (file)
@@ -495,7 +495,7 @@ static unsigned long k210_pll_get_rate(struct clk_hw *hw,
        f = FIELD_GET(K210_PLL_CLKF, reg) + 1;
        od = FIELD_GET(K210_PLL_CLKOD, reg) + 1;
 
-       return (u64)parent_rate * f / (r * od);
+       return div_u64((u64)parent_rate * f, r * od);
 }
 
 static const struct clk_ops k210_pll_ops = {
index 9ea1a80acbe8b5be6475d2f729e5609c0a61d3a9..8036bd8cbb0ac21fbe5688c71809effd85b68224 100644 (file)
@@ -841,5 +841,4 @@ static void __exit hi3559av100_crg_exit(void)
 module_exit(hi3559av100_crg_exit);
 
 
-MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("HiSilicon Hi3559AV100 CRG Driver");
index 0ddc73e07be429734d32aae75f08534782b3c564..bce61c45e96748d5a7f3e67ec82a5fecdd6f74a3 100644 (file)
@@ -291,4 +291,3 @@ module_exit(clk_ccc_exit);
 
 MODULE_DESCRIPTION("Microchip PolarFire SoC Clock Conditioning Circuitry Driver");
 MODULE_AUTHOR("Conor Dooley <conor.dooley@microchip.com>");
-MODULE_LICENSE("GPL");
index 6ad2954948a5abb2f9612d4e52832d73ef25020d..11316c3b14ca49806839059a2b754658a4710826 100644 (file)
@@ -106,7 +106,8 @@ static void psci_pd_remove(void)
        struct psci_pd_provider *pd_provider, *it;
        struct generic_pm_domain *genpd;
 
-       list_for_each_entry_safe(pd_provider, it, &psci_pd_providers, link) {
+       list_for_each_entry_safe_reverse(pd_provider, it,
+                                        &psci_pd_providers, link) {
                of_genpd_del_provider(pd_provider->node);
 
                genpd = of_genpd_remove_last(pd_provider->node);
index 73140b854b313f53736c690162a05595a036e6b8..c15928b8c5cc9976b9f12ca5a7e4154dcbb0e888 100644 (file)
@@ -14,7 +14,6 @@
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/device.h>
-#include <linux/of.h>
 
 #include "common.h"
 
@@ -436,7 +435,7 @@ struct scmi_device *scmi_device_create(struct device_node *np,
        /* Nothing to do. */
        if (!phead) {
                mutex_unlock(&scmi_requested_devices_mtx);
-               return scmi_dev;
+               return NULL;
        }
 
        /* Walk the list of requested devices for protocol and create them */
index d21c7eafd641c228ce7e156ee516f5b645376c9b..dbc474ff62b71362a510cd02ecba2c50d72f7b53 100644 (file)
@@ -2221,8 +2221,8 @@ static int __scmi_xfer_info_init(struct scmi_info *sinfo,
        hash_init(info->pending_xfers);
 
        /* Allocate a bitmask sized to hold MSG_TOKEN_MAX tokens */
-       info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(MSG_TOKEN_MAX),
-                                             sizeof(long), GFP_KERNEL);
+       info->xfer_alloc_table = devm_bitmap_zalloc(dev, MSG_TOKEN_MAX,
+                                                   GFP_KERNEL);
        if (!info->xfer_alloc_table)
                return -ENOMEM;
 
@@ -2657,6 +2657,7 @@ static int scmi_probe(struct platform_device *pdev)
        struct scmi_handle *handle;
        const struct scmi_desc *desc;
        struct scmi_info *info;
+       bool coex = IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT_COEX);
        struct device *dev = &pdev->dev;
        struct device_node *child, *np = dev->of_node;
 
@@ -2731,16 +2732,13 @@ static int scmi_probe(struct platform_device *pdev)
                        dev_warn(dev, "Failed to setup SCMI debugfs.\n");
 
                if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
-                       bool coex =
-                             IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT_COEX);
-
                        ret = scmi_debugfs_raw_mode_setup(info);
                        if (!coex) {
                                if (ret)
                                        goto clear_dev_req_notifier;
 
-                               /* Bail out anyway when coex enabled */
-                               return ret;
+                               /* Bail out anyway when coex disabled. */
+                               return 0;
                        }
 
                        /* Coex enabled, carry on in any case. */
@@ -2764,6 +2762,8 @@ static int scmi_probe(struct platform_device *pdev)
        ret = scmi_protocol_acquire(handle, SCMI_PROTOCOL_BASE);
        if (ret) {
                dev_err(dev, "unable to communicate with SCMI\n");
+               if (coex)
+                       return 0;
                goto notification_exit;
        }
 
index 0d9c9538b7f4175e0139563649ff2c687721f72e..112c285deb97b768fa3c23f6d738d8c0d043025b 100644 (file)
@@ -52,6 +52,39 @@ static bool mailbox_chan_available(struct device_node *of_node, int idx)
                                           "#mbox-cells", idx, NULL);
 }
 
+static int mailbox_chan_validate(struct device *cdev)
+{
+       int num_mb, num_sh, ret = 0;
+       struct device_node *np = cdev->of_node;
+
+       num_mb = of_count_phandle_with_args(np, "mboxes", "#mbox-cells");
+       num_sh = of_count_phandle_with_args(np, "shmem", NULL);
+       /* Bail out if mboxes and shmem descriptors are inconsistent */
+       if (num_mb <= 0 || num_sh > 2 || num_mb != num_sh) {
+               dev_warn(cdev, "Invalid channel descriptor for '%s'\n",
+                        of_node_full_name(np));
+               return -EINVAL;
+       }
+
+       if (num_sh > 1) {
+               struct device_node *np_tx, *np_rx;
+
+               np_tx = of_parse_phandle(np, "shmem", 0);
+               np_rx = of_parse_phandle(np, "shmem", 1);
+               /* SCMI Tx and Rx shared mem areas have to be distinct */
+               if (!np_tx || !np_rx || np_tx == np_rx) {
+                       dev_warn(cdev, "Invalid shmem descriptor for '%s'\n",
+                                of_node_full_name(np));
+                       ret = -EINVAL;
+               }
+
+               of_node_put(np_tx);
+               of_node_put(np_rx);
+       }
+
+       return ret;
+}
+
 static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
                              bool tx)
 {
@@ -64,6 +97,10 @@ static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
        resource_size_t size;
        struct resource res;
 
+       ret = mailbox_chan_validate(cdev);
+       if (ret)
+               return ret;
+
        smbox = devm_kzalloc(dev, sizeof(*smbox), GFP_KERNEL);
        if (!smbox)
                return -ENOMEM;
index f54e6fdf08e2bf4c62c4b93f7550b6f47074d361..f80a9af3d16e94de51e4f124ec4ff40a43eaa31a 100644 (file)
@@ -215,6 +215,14 @@ efi_earlycon_write(struct console *con, const char *str, unsigned int num)
        }
 }
 
+static bool __initdata fb_probed;
+
+void __init efi_earlycon_reprobe(void)
+{
+       if (fb_probed)
+               setup_earlycon("efifb");
+}
+
 static int __init efi_earlycon_setup(struct earlycon_device *device,
                                     const char *opt)
 {
@@ -222,15 +230,17 @@ static int __init efi_earlycon_setup(struct earlycon_device *device,
        u16 xres, yres;
        u32 i;
 
-       if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
+       fb_wb = opt && !strcmp(opt, "ram");
+
+       if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI) {
+               fb_probed = true;
                return -ENODEV;
+       }
 
        fb_base = screen_info.lfb_base;
        if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
                fb_base |= (u64)screen_info.ext_lfb_base << 32;
 
-       fb_wb = opt && !strcmp(opt, "ram");
-
        si = &screen_info;
        xres = si->lfb_width;
        yres = si->lfb_height;
index 2c16080e1f71907c6f7b03434c4cd2ac47a6ea62..ef0820f1a9246ede74aa4a7aec41938628d2c755 100644 (file)
@@ -72,6 +72,9 @@ static void __init init_screen_info(void)
                if (memblock_is_map_memory(screen_info.lfb_base))
                        memblock_mark_nomap(screen_info.lfb_base,
                                            screen_info.lfb_size);
+
+               if (IS_ENABLED(CONFIG_EFI_EARLYCON))
+                       efi_earlycon_reprobe();
        }
 }
 
index 43e9a4cab9f5dcf169d158929bb3d9711817621b..ccdd6a130d98618ec52098bcc1cfc7f4f588c400 100644 (file)
@@ -44,4 +44,4 @@ OBJCOPYFLAGS_vmlinuz.efi := -O binary
 $(obj)/vmlinuz.efi: $(obj)/vmlinuz.efi.elf FORCE
        $(call if_changed,objcopy)
 
-targets += zboot-header.o vmlinuz.o vmlinuz.efi.elf vmlinuz.efi
+targets += zboot-header.o vmlinuz vmlinuz.o vmlinuz.efi.elf vmlinuz.efi
index d4a6b12a87413024a9c1df0b7a1744e8d0cd8f1a..770b8ecb73984c6115a5c37e2105637d4ec9bfd6 100644 (file)
@@ -85,8 +85,10 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
                }
        }
 
-       if (image->image_base != _text)
+       if (image->image_base != _text) {
                efi_err("FIRMWARE BUG: efi_loaded_image_t::image_base has bogus value\n");
+               image->image_base = _text;
+       }
 
        if (!IS_ALIGNED((u64)_text, SEGMENT_ALIGN))
                efi_err("FIRMWARE BUG: kernel image not aligned on %dk boundary\n",
@@ -139,6 +141,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
        *image_addr = *reserve_addr;
        memcpy((void *)*image_addr, _text, kernel_size);
        caches_clean_inval_pou(*image_addr, *image_addr + kernel_codesize);
+       efi_remap_image(*image_addr, *reserve_size, kernel_codesize);
 
        return EFI_SUCCESS;
 }
index 3997702663727e30948d9083bad6e70e1498af02..8aad8c49d43f18e0903e055ecfbf38740c957fa0 100644 (file)
 
 static bool system_needs_vamap(void)
 {
-       const u8 *type1_family = efi_get_smbios_string(1, family);
+       const struct efi_smbios_type4_record *record;
+       const u32 __aligned(1) *socid;
+       const u8 *version;
 
        /*
         * Ampere eMAG, Altra, and Altra Max machines crash in SetTime() if
-        * SetVirtualAddressMap() has not been called prior.
+        * SetVirtualAddressMap() has not been called prior. Most Altra systems
+        * can be identified by the SMCCC soc ID, which is conveniently exposed
+        * via the type 4 SMBIOS records. Otherwise, test the processor version
+        * field. eMAG systems all appear to have the processor version field
+        * set to "eMAG".
         */
-       if (!type1_family || (
-           strcmp(type1_family, "eMAG") &&
-           strcmp(type1_family, "Altra") &&
-           strcmp(type1_family, "Altra Max")))
+       record = (struct efi_smbios_type4_record *)efi_get_smbios_record(4);
+       if (!record)
                return false;
 
-       efi_warn("Working around broken SetVirtualAddressMap()\n");
-       return true;
+       socid = (u32 *)record->processor_id;
+       switch (*socid & 0xffff000f) {
+               static char const altra[] = "Ampere(TM) Altra(TM) Processor";
+               static char const emag[] = "eMAG";
+
+       default:
+               version = efi_get_smbios_string(&record->header, 4,
+                                               processor_version);
+               if (!version || (strncmp(version, altra, sizeof(altra) - 1) &&
+                                strncmp(version, emag, sizeof(emag) - 1)))
+                       break;
+
+               fallthrough;
+
+       case 0x0a160001:        // Altra
+       case 0x0a160002:        // Altra Max
+               efi_warn("Working around broken SetVirtualAddressMap()\n");
+               return true;
+       }
+
+       return false;
 }
 
 efi_status_t check_platform_features(void)
index 5245c4f031c0a70a5a8aa1146a7da3b5abb7d933..cc4dcaea67fa67f4ae5ba312a6c3cdb575663a45 100644 (file)
@@ -5,6 +5,15 @@
 
 #include "efistub.h"
 
+static unsigned long screen_info_offset;
+
+struct screen_info *alloc_screen_info(void)
+{
+       if (IS_ENABLED(CONFIG_ARM))
+               return __alloc_screen_info();
+       return (void *)&screen_info + screen_info_offset;
+}
+
 /*
  * EFI entry point for the generic EFI stub used by ARM, arm64, RISC-V and
  * LoongArch. This is the entrypoint that is described in the PE/COFF header
@@ -56,6 +65,8 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
                return status;
        }
 
+       screen_info_offset = image_addr - (unsigned long)image->image_base;
+
        status = efi_stub_common(handle, image, image_addr, cmdline_ptr);
 
        efi_free(image_size, image_addr);
index 2955c1ac6a36ee00cff656f63eb79c57a98fb10d..f9c1e8a2bd1d3e49b5e98bfa6d419fe9b74b075d 100644 (file)
 static u64 virtmap_base = EFI_RT_VIRTUAL_BASE;
 static bool flat_va_mapping = (EFI_RT_VIRTUAL_OFFSET != 0);
 
-struct screen_info * __weak alloc_screen_info(void)
-{
-       return &screen_info;
-}
-
 void __weak free_screen_info(struct screen_info *si)
 {
 }
index 6bd3bb86d9679a1c26f6fa7416536f91e8762ba0..148013bcb5f89fdd90de221f4d88da83f6e75337 100644 (file)
@@ -1062,6 +1062,7 @@ efi_enable_reset_attack_mitigation(void) { }
 void efi_retrieve_tpm2_eventlog(void);
 
 struct screen_info *alloc_screen_info(void);
+struct screen_info *__alloc_screen_info(void);
 void free_screen_info(struct screen_info *si);
 
 void efi_cache_sync_image(unsigned long image_base,
@@ -1074,6 +1075,8 @@ struct efi_smbios_record {
        u16     handle;
 };
 
+const struct efi_smbios_record *efi_get_smbios_record(u8 type);
+
 struct efi_smbios_type1_record {
        struct efi_smbios_record        header;
 
@@ -1087,14 +1090,46 @@ struct efi_smbios_type1_record {
        u8                              family;
 };
 
-#define efi_get_smbios_string(__type, __name) ({                       \
-       int size = sizeof(struct efi_smbios_type ## __type ## _record); \
+struct efi_smbios_type4_record {
+       struct efi_smbios_record        header;
+
+       u8                              socket;
+       u8                              processor_type;
+       u8                              processor_family;
+       u8                              processor_manufacturer;
+       u8                              processor_id[8];
+       u8                              processor_version;
+       u8                              voltage;
+       u16                             external_clock;
+       u16                             max_speed;
+       u16                             current_speed;
+       u8                              status;
+       u8                              processor_upgrade;
+       u16                             l1_cache_handle;
+       u16                             l2_cache_handle;
+       u16                             l3_cache_handle;
+       u8                              serial_number;
+       u8                              asset_tag;
+       u8                              part_number;
+       u8                              core_count;
+       u8                              enabled_core_count;
+       u8                              thread_count;
+       u16                             processor_characteristics;
+       u16                             processor_family2;
+       u16                             core_count2;
+       u16                             enabled_core_count2;
+       u16                             thread_count2;
+       u16                             thread_enabled;
+};
+
+#define efi_get_smbios_string(__record, __type, __name) ({             \
        int off = offsetof(struct efi_smbios_type ## __type ## _record, \
                           __name);                                     \
-       __efi_get_smbios_string(__type, off, size);                     \
+       __efi_get_smbios_string((__record), __type, off);               \
 })
 
-const u8 *__efi_get_smbios_string(u8 type, int offset, int recsize);
+const u8 *__efi_get_smbios_string(const struct efi_smbios_record *record,
+                                 u8 type, int offset);
 
 void efi_remap_image(unsigned long image_base, unsigned alloc_size,
                     unsigned long code_size);
index 1692d19ae80f0065627650344aa6dbae92180928..32c7a54923b4c1273feee1db101accd48083256c 100644 (file)
@@ -101,6 +101,7 @@ efi_status_t efi_random_alloc(unsigned long size,
         * to calculate the randomly chosen address, and allocate it directly
         * using EFI_ALLOCATE_ADDRESS.
         */
+       status = EFI_OUT_OF_RESOURCES;
        for (map_offset = 0; map_offset < map->map_size; map_offset += map->desc_size) {
                efi_memory_desc_t *md = (void *)map->map + map_offset;
                efi_physical_addr_t target;
index 8e76a8b384ba142d6967b59e9dc310b4828f60f7..4be1c4d1f922becd08ddd13d542f4d2fbd6e1883 100644 (file)
  * early, but it only works if the EFI stub is part of the core kernel image
  * itself. The zboot decompressor can only use the configuration table
  * approach.
- *
- * In order to support both methods from the same build of the EFI stub
- * library, provide this dummy global definition of struct screen_info. If it
- * is required to satisfy a link dependency, it means we need to override the
- * __weak alloc and free methods with the ones below, and those will be pulled
- * in as well.
  */
-struct screen_info screen_info;
 
 static efi_guid_t screen_info_guid = LINUX_EFI_SCREEN_INFO_TABLE_GUID;
 
-struct screen_info *alloc_screen_info(void)
+struct screen_info *__alloc_screen_info(void)
 {
        struct screen_info *si;
        efi_status_t status;
index 460418b7f5f5e9ab1d721ec096eaa77bb2462c58..c217de2cc8d56dc2796679f207772ff1288ba3d4 100644 (file)
@@ -22,21 +22,30 @@ struct efi_smbios_protocol {
        u8 minor_version;
 };
 
-const u8 *__efi_get_smbios_string(u8 type, int offset, int recsize)
+const struct efi_smbios_record *efi_get_smbios_record(u8 type)
 {
        struct efi_smbios_record *record;
        efi_smbios_protocol_t *smbios;
        efi_status_t status;
        u16 handle = 0xfffe;
-       const u8 *strtable;
 
        status = efi_bs_call(locate_protocol, &EFI_SMBIOS_PROTOCOL_GUID, NULL,
                             (void **)&smbios) ?:
                 efi_call_proto(smbios, get_next, &handle, &type, &record, NULL);
        if (status != EFI_SUCCESS)
                return NULL;
+       return record;
+}
+
+const u8 *__efi_get_smbios_string(const struct efi_smbios_record *record,
+                                 u8 type, int offset)
+{
+       const u8 *strtable;
+
+       if (!record)
+               return NULL;
 
-       strtable = (u8 *)record + recsize;
+       strtable = (u8 *)record + record->length;
        for (int i = 1; i < ((u8 *)record)[offset]; i++) {
                int len = strlen(strtable);
 
index ec4525d40e0cf6d635161da889f14862d7a5610d..445cb646eaaaf1c657f572e0f2d42c63962bfe27 100644 (file)
@@ -63,7 +63,7 @@ __efistub_efi_zboot_header:
        .long           .Lefi_header_end - .Ldoshdr
        .long           0
        .short          IMAGE_SUBSYSTEM_EFI_APPLICATION
-       .short          0
+       .short          IMAGE_DLL_CHARACTERISTICS_NX_COMPAT
 #ifdef CONFIG_64BIT
        .quad           0, 0, 0, 0
 #else
index ba234e062a1a29dae5618fa9c85bb65ff96fbee5..6105e5e2eda4612b3aa59e9c3e10e25f81075d7d 100644 (file)
@@ -57,6 +57,11 @@ void __weak efi_cache_sync_image(unsigned long image_base,
        // executable code loaded into memory to be safe for execution.
 }
 
+struct screen_info *alloc_screen_info(void)
+{
+       return __alloc_screen_info();
+}
+
 asmlinkage efi_status_t __efiapi
 efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab)
 {
index f06fdacc9bc830c8f89916a342f795fc33d988cb..456d0e5eaf78b595a66c622103b719e0fd2d3a69 100644 (file)
@@ -272,6 +272,14 @@ static const struct dmi_system_id efifb_dmi_swap_width_height[] __initconst = {
                                        "IdeaPad Duet 3 10IGL5"),
                },
        },
+       {
+               /* Lenovo Yoga Book X91F / X91L */
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       /* Non exact match to match F + L versions */
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X91"),
+               },
+       },
        {},
 };
 
@@ -341,7 +349,7 @@ static const struct fwnode_operations efifb_fwnode_ops = {
 #ifdef CONFIG_EFI
 static struct fwnode_handle efifb_fwnode;
 
-__init void sysfb_apply_efi_quirks(struct platform_device *pd)
+__init void sysfb_apply_efi_quirks(void)
 {
        if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI ||
            !(screen_info.capabilities & VIDEO_CAPABILITY_SKIP_QUIRKS))
@@ -355,7 +363,10 @@ __init void sysfb_apply_efi_quirks(struct platform_device *pd)
                screen_info.lfb_height = temp;
                screen_info.lfb_linelength = 4 * screen_info.lfb_width;
        }
+}
 
+__init void sysfb_set_efifb_fwnode(struct platform_device *pd)
+{
        if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI && IS_ENABLED(CONFIG_PCI)) {
                fwnode_init(&efifb_fwnode, &efifb_fwnode_ops);
                pd->dev.fwnode = &efifb_fwnode;
index 468d4d5ab550c91cfa415418cc5333746dc596c8..b1e11f85b805462fb96def68632ae11fe3a7fd88 100644 (file)
@@ -1479,7 +1479,7 @@ static int qcom_scm_probe(struct platform_device *pdev)
 
        init_completion(&__scm->waitq_comp);
 
-       irq = platform_get_irq(pdev, 0);
+       irq = platform_get_irq_optional(pdev, 0);
        if (irq < 0) {
                if (irq != -ENXIO)
                        return irq;
index 3fd3563d962b87d73ff2fc2bb3079461cd9f22e0..3c197db42c9d936866f9ff68cf7561e4735cfe1e 100644 (file)
@@ -81,6 +81,8 @@ static __init int sysfb_init(void)
        if (disabled)
                goto unlock_mutex;
 
+       sysfb_apply_efi_quirks();
+
        /* try to create a simple-framebuffer device */
        compatible = sysfb_parse_mode(si, &mode);
        if (compatible) {
@@ -107,7 +109,7 @@ static __init int sysfb_init(void)
                goto unlock_mutex;
        }
 
-       sysfb_apply_efi_quirks(pd);
+       sysfb_set_efifb_fwnode(pd);
 
        ret = platform_device_add_data(pd, si, sizeof(*si));
        if (ret)
index ce9c007ed66ff76d64bf3fd4c023198152a32459..82c64cb9f5316c230fcb25b34936f96f92e215d1 100644 (file)
@@ -141,7 +141,7 @@ __init struct platform_device *sysfb_create_simplefb(const struct screen_info *s
        if (!pd)
                return ERR_PTR(-ENOMEM);
 
-       sysfb_apply_efi_quirks(pd);
+       sysfb_set_efifb_fwnode(pd);
 
        ret = platform_device_add_resources(pd, &res, 1);
        if (ret)
index acd83d29c8667d86b7d854e8f240a16123afa21f..ce86a18503054110062a315c2449c6ce57bad26a 100644 (file)
@@ -206,7 +206,7 @@ static int do_feature_check_call(const u32 api_id)
        }
 
        /* Add new entry if not present */
-       feature_data = kmalloc(sizeof(*feature_data), GFP_KERNEL);
+       feature_data = kmalloc(sizeof(*feature_data), GFP_ATOMIC);
        if (!feature_data)
                return -ENOMEM;
 
index d8a421ce26a83e1b9f98dfbc37bd4135cbf34497..31ae0adbb295ab190bd69ef37ffbb02062536d1a 100644 (file)
@@ -536,6 +536,9 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
        if (ACPI_FAILURE(status))
                return;
 
+       if (acpi_quirk_skip_gpio_event_handlers())
+               return;
+
        acpi_walk_resources(handle, METHOD_NAME__AEI,
                            acpi_gpiochip_alloc_event, acpi_gpio);
 
index 164141bc8b4ad1e145ead120d3094ea02efa40bb..39018f784f9c01d430472a7541c03ec645d35ce5 100644 (file)
@@ -1272,6 +1272,7 @@ void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
 int amdgpu_device_pci_reset(struct amdgpu_device *adev);
 bool amdgpu_device_need_post(struct amdgpu_device *adev);
 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev);
+bool amdgpu_device_aspm_support_quirk(void);
 
 void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
                                  u64 num_vis_bytes);
@@ -1391,10 +1392,12 @@ int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_sta
 int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
 
 void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps);
+bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev);
 void amdgpu_acpi_detect(void);
 #else
 static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
 static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
+static inline bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev) { return false; }
 static inline void amdgpu_acpi_detect(void) { }
 static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; }
 static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
@@ -1405,11 +1408,9 @@ static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev,
 
 #if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
 bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
-bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev);
 bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
 #else
 static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
-static inline bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev) { return false; }
 static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
 #endif
 
index d4196fcb85a08a364a4a7b235c30a62dde7fa8f8..60b1857f469ebb4effb70e06d194ebfea16305e9 100644 (file)
@@ -971,6 +971,29 @@ static bool amdgpu_atcs_pci_probe_handle(struct pci_dev *pdev)
        return true;
 }
 
+
+/**
+ * amdgpu_acpi_should_gpu_reset
+ *
+ * @adev: amdgpu_device_pointer
+ *
+ * returns true if should reset GPU, false if not
+ */
+bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev)
+{
+       if (adev->flags & AMD_IS_APU)
+               return false;
+
+       if (amdgpu_sriov_vf(adev))
+               return false;
+
+#if IS_ENABLED(CONFIG_SUSPEND)
+       return pm_suspend_target_state != PM_SUSPEND_TO_IDLE;
+#else
+       return true;
+#endif
+}
+
 /*
  * amdgpu_acpi_detect - detect ACPI ATIF/ATCS methods
  *
@@ -1042,24 +1065,6 @@ bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev)
                (pm_suspend_target_state == PM_SUSPEND_MEM);
 }
 
-/**
- * amdgpu_acpi_should_gpu_reset
- *
- * @adev: amdgpu_device_pointer
- *
- * returns true if should reset GPU, false if not
- */
-bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev)
-{
-       if (adev->flags & AMD_IS_APU)
-               return false;
-
-       if (amdgpu_sriov_vf(adev))
-               return false;
-
-       return pm_suspend_target_state != PM_SUSPEND_TO_IDLE;
-}
-
 /**
  * amdgpu_acpi_is_s0ix_active
  *
index c4a4e2fe66814ccf07a2edd13a00bd5bbf8bedd2..3d98fc2ad36b04eb0d44fbc0e897608e5122a8c7 100644 (file)
 
 #include <drm/drm_drv.h>
 
+#if IS_ENABLED(CONFIG_X86)
+#include <asm/intel-family.h>
+#endif
+
 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
@@ -1356,6 +1360,17 @@ bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
        return pcie_aspm_enabled(adev->pdev);
 }
 
+bool amdgpu_device_aspm_support_quirk(void)
+{
+#if IS_ENABLED(CONFIG_X86)
+       struct cpuinfo_x86 *c = &cpu_data(0);
+
+       return !(c->x86 == 6 && c->x86_model == INTEL_FAM6_ALDERLAKE);
+#else
+       return true;
+#endif
+}
+
 /* if we get transitioned to only one device, take VGA back */
 /**
  * amdgpu_device_vga_set_decode - enable/disable vga decode
@@ -4145,8 +4160,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
        if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
                DRM_WARN("smart shift update failed\n");
 
-       drm_kms_helper_poll_disable(dev);
-
        if (fbcon)
                drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
 
@@ -4243,8 +4256,6 @@ exit:
        if (fbcon)
                drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
 
-       drm_kms_helper_poll_enable(dev);
-
        amdgpu_ras_resume(adev);
 
        if (adev->mode_info.num_crtc) {
index 503f89a766c3774e4626b70f7ea49cacd7e31b2f..d60fe7eb5579aeae48961105b832e1a47fe1300c 100644 (file)
@@ -1618,6 +1618,8 @@ int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
        struct drm_connector_list_iter iter;
        int r;
 
+       drm_kms_helper_poll_disable(dev);
+
        /* turn off display hw */
        drm_modeset_lock_all(dev);
        drm_connector_list_iter_begin(dev, &iter);
@@ -1694,6 +1696,8 @@ int amdgpu_display_resume_helper(struct amdgpu_device *adev)
 
        drm_modeset_unlock_all(dev);
 
+       drm_kms_helper_poll_enable(dev);
+
        return 0;
 }
 
index f5ffca24def4000f42043d4c09e5f05fe86f8876..ba5def374368e578ba842f20bbeab6d38777f52e 100644 (file)
@@ -2467,7 +2467,10 @@ static int amdgpu_pmops_freeze(struct device *dev)
        adev->in_s4 = false;
        if (r)
                return r;
-       return amdgpu_asic_reset(adev);
+
+       if (amdgpu_acpi_should_gpu_reset(adev))
+               return amdgpu_asic_reset(adev);
+       return 0;
 }
 
 static int amdgpu_pmops_thaw(struct device *dev)
index faff4a3f96e6e8911e573daf46b4b894103a76e3..f52d0ba91a770a4b437a1137aa88cf6a7113cf53 100644 (file)
@@ -678,6 +678,15 @@ void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring)
                ptr = &ring->fence_drv.fences[i];
                old = rcu_dereference_protected(*ptr, 1);
                if (old && old->ops == &amdgpu_job_fence_ops) {
+                       struct amdgpu_job *job;
+
+                       /* For non-scheduler bad job, i.e. failed ib test, we need to signal
+                        * it right here or we won't be able to track them in fence_drv
+                        * and they will remain unsignaled during sa_bo free.
+                        */
+                       job = container_of(old, struct amdgpu_job, hw_fence);
+                       if (!job->base.s_fence && !dma_fence_is_signaled(old))
+                               dma_fence_signal(old);
                        RCU_INIT_POINTER(*ptr, NULL);
                        dma_fence_put(old);
                }
index 25217b05c0ea8dabd5c4ef4fd22885e875d2af2c..e7974de8b035d6f356db79549408e6fef9755364 100644 (file)
@@ -26,6 +26,7 @@
 
 #include <linux/firmware.h>
 #include <linux/module.h>
+#include <linux/dmi.h>
 #include <linux/pci.h>
 #include <linux/debugfs.h>
 #include <drm/drm_drv.h>
@@ -114,6 +115,24 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
            (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
                adev->vcn.indirect_sram = true;
 
+       /*
+        * Some Steam Deck's BIOS versions are incompatible with the
+        * indirect SRAM mode, leading to amdgpu being unable to get
+        * properly probed (and even potentially crashing the kernel).
+        * Hence, check for these versions here - notice this is
+        * restricted to Vangogh (Deck's APU).
+        */
+       if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(3, 0, 2)) {
+               const char *bios_ver = dmi_get_system_info(DMI_BIOS_VERSION);
+
+               if (bios_ver && (!strncmp("F7A0113", bios_ver, 7) ||
+                    !strncmp("F7A0114", bios_ver, 7))) {
+                       adev->vcn.indirect_sram = false;
+                       dev_info(adev->dev,
+                               "Steam Deck quirk: indirect SRAM disabled on BIOS %s\n", bios_ver);
+               }
+       }
+
        hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
        adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
 
index b9e9480448afe9e56324daf866deb77eb2d34063..4f7bab52282ac1b6463fb2420315bbdf4b92b0ba 100644 (file)
@@ -124,6 +124,8 @@ enum AMDGIM_FEATURE_FLAG {
        AMDGIM_FEATURE_PP_ONE_VF = (1 << 4),
        /* Indirect Reg Access enabled */
        AMDGIM_FEATURE_INDIRECT_REG_ACCESS = (1 << 5),
+       /* AV1 Support MODE*/
+       AMDGIM_FEATURE_AV1_SUPPORT = (1 << 6),
 };
 
 enum AMDGIM_REG_ACCESS_FLAG {
@@ -322,6 +324,8 @@ static inline bool is_virtual_machine(void)
        ((!amdgpu_in_reset(adev)) && adev->virt.tdr_debug)
 #define amdgpu_sriov_is_normal(adev) \
        ((!amdgpu_in_reset(adev)) && (!adev->virt.tdr_debug))
+#define amdgpu_sriov_is_av1_support(adev) \
+       ((adev)->virt.gim_feature & AMDGIM_FEATURE_AV1_SUPPORT)
 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
 void amdgpu_virt_init_setting(struct amdgpu_device *adev);
 void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
index 6c97148ca0ed35dc9c66c828bd45a870281a5c44..24d42d24e6a01e033a84edbbfe561e0878dd6049 100644 (file)
@@ -93,7 +93,8 @@ union amd_sriov_msg_feature_flags {
                uint32_t mm_bw_management  : 1;
                uint32_t pp_one_vf_mode    : 1;
                uint32_t reg_indirect_acc  : 1;
-               uint32_t reserved          : 26;
+               uint32_t av1_support       : 1;
+               uint32_t reserved          : 25;
        } flags;
        uint32_t all;
 };
index 3bf697a80cf2fc30a4dcc49e154ee87b5ffe4370..ecf8ceb53311ac2d1dbb09b80244fef86dc3bca3 100644 (file)
@@ -1287,6 +1287,11 @@ static int gfx_v11_0_sw_init(void *handle)
                break;
        }
 
+       /* Enable CG flag in one VF mode for enabling RLC safe mode enter/exit */
+       if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 3) &&
+               amdgpu_sriov_is_pp_one_vf(adev))
+               adev->cg_flags = AMD_CG_SUPPORT_GFX_CGCG;
+
        /* EOP Event */
        r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
                              GFX_11_0_0__SRCID__CP_EOP_INTERRUPT,
@@ -4655,6 +4660,14 @@ static bool gfx_v11_0_check_soft_reset(void *handle)
        return false;
 }
 
+static int gfx_v11_0_post_soft_reset(void *handle)
+{
+       /**
+        * GFX soft reset will impact MES, need resume MES when do GFX soft reset
+        */
+       return amdgpu_mes_resume((struct amdgpu_device *)handle);
+}
+
 static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev)
 {
        uint64_t clock;
@@ -6166,6 +6179,7 @@ static const struct amd_ip_funcs gfx_v11_0_ip_funcs = {
        .wait_for_idle = gfx_v11_0_wait_for_idle,
        .soft_reset = gfx_v11_0_soft_reset,
        .check_soft_reset = gfx_v11_0_check_soft_reset,
+       .post_soft_reset = gfx_v11_0_post_soft_reset,
        .set_clockgating_state = gfx_v11_0_set_clockgating_state,
        .set_powergating_state = gfx_v11_0_set_powergating_state,
        .get_clockgating_state = gfx_v11_0_get_clockgating_state,
index 855d390c41de159c85b3341289da14c7671931f4..ebe0e2d7dbd1b59d772ae52d641f43a9914921b3 100644 (file)
@@ -578,7 +578,7 @@ static void nv_pcie_gen3_enable(struct amdgpu_device *adev)
 
 static void nv_program_aspm(struct amdgpu_device *adev)
 {
-       if (!amdgpu_device_should_use_aspm(adev))
+       if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_aspm_support_quirk())
                return;
 
        if (!(adev->flags & AMD_IS_APU) &&
@@ -1055,8 +1055,8 @@ static int nv_common_late_init(void *handle)
                        amdgpu_virt_update_sriov_video_codec(adev,
                                                             sriov_sc_video_codecs_encode_array,
                                                             ARRAY_SIZE(sriov_sc_video_codecs_encode_array),
-                                                            sriov_sc_video_codecs_decode_array_vcn1,
-                                                            ARRAY_SIZE(sriov_sc_video_codecs_decode_array_vcn1));
+                                                            sriov_sc_video_codecs_decode_array_vcn0,
+                                                            ARRAY_SIZE(sriov_sc_video_codecs_decode_array_vcn0));
                }
        }
 
index 061793d390ccc5d38eb25e252bd92852366aaa57..c82b3a7ea5f0840c2f894ac6fc77b2c92f488840 100644 (file)
@@ -102,6 +102,59 @@ static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode_vcn1 =
        .codec_array = vcn_4_0_0_video_codecs_decode_array_vcn1,
 };
 
+/* SRIOV SOC21, not const since data is controlled by host */
+static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_encode_array_vcn0[] = {
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
+};
+
+static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_encode_array_vcn1[] = {
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
+};
+
+static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_encode_vcn0 = {
+       .codec_count = ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_encode_array_vcn0),
+       .codec_array = sriov_vcn_4_0_0_video_codecs_encode_array_vcn0,
+};
+
+static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_encode_vcn1 = {
+       .codec_count = ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_encode_array_vcn1),
+       .codec_array = sriov_vcn_4_0_0_video_codecs_encode_array_vcn1,
+};
+
+static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_decode_array_vcn0[] = {
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
+};
+
+static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_decode_array_vcn1[] = {
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+};
+
+static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_decode_vcn0 = {
+       .codec_count = ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_decode_array_vcn0),
+       .codec_array = sriov_vcn_4_0_0_video_codecs_decode_array_vcn0,
+};
+
+static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_decode_vcn1 = {
+       .codec_count = ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_decode_array_vcn1),
+       .codec_array = sriov_vcn_4_0_0_video_codecs_decode_array_vcn1,
+};
+
 static int soc21_query_video_codecs(struct amdgpu_device *adev, bool encode,
                                 const struct amdgpu_video_codecs **codecs)
 {
@@ -112,16 +165,31 @@ static int soc21_query_video_codecs(struct amdgpu_device *adev, bool encode,
        case IP_VERSION(4, 0, 0):
        case IP_VERSION(4, 0, 2):
        case IP_VERSION(4, 0, 4):
-               if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) {
-                       if (encode)
-                               *codecs = &vcn_4_0_0_video_codecs_encode_vcn1;
-                       else
-                               *codecs = &vcn_4_0_0_video_codecs_decode_vcn1;
+               if (amdgpu_sriov_vf(adev)) {
+                       if ((adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) ||
+                       !amdgpu_sriov_is_av1_support(adev)) {
+                               if (encode)
+                                       *codecs = &sriov_vcn_4_0_0_video_codecs_encode_vcn1;
+                               else
+                                       *codecs = &sriov_vcn_4_0_0_video_codecs_decode_vcn1;
+                       } else {
+                               if (encode)
+                                       *codecs = &sriov_vcn_4_0_0_video_codecs_encode_vcn0;
+                               else
+                                       *codecs = &sriov_vcn_4_0_0_video_codecs_decode_vcn0;
+                       }
                } else {
-                       if (encode)
-                               *codecs = &vcn_4_0_0_video_codecs_encode_vcn0;
-                       else
-                               *codecs = &vcn_4_0_0_video_codecs_decode_vcn0;
+                       if ((adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0)) {
+                               if (encode)
+                                       *codecs = &vcn_4_0_0_video_codecs_encode_vcn1;
+                               else
+                                       *codecs = &vcn_4_0_0_video_codecs_decode_vcn1;
+                       } else {
+                               if (encode)
+                                       *codecs = &vcn_4_0_0_video_codecs_encode_vcn0;
+                               else
+                                       *codecs = &vcn_4_0_0_video_codecs_decode_vcn0;
+                       }
                }
                return 0;
        default:
@@ -730,8 +798,23 @@ static int soc21_common_late_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (amdgpu_sriov_vf(adev))
+       if (amdgpu_sriov_vf(adev)) {
                xgpu_nv_mailbox_get_irq(adev);
+               if ((adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) ||
+               !amdgpu_sriov_is_av1_support(adev)) {
+                       amdgpu_virt_update_sriov_video_codec(adev,
+                                                            sriov_vcn_4_0_0_video_codecs_encode_array_vcn1,
+                                                            ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_encode_array_vcn1),
+                                                            sriov_vcn_4_0_0_video_codecs_decode_array_vcn1,
+                                                            ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_decode_array_vcn1));
+               } else {
+                       amdgpu_virt_update_sriov_video_codec(adev,
+                                                            sriov_vcn_4_0_0_video_codecs_encode_array_vcn0,
+                                                            ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_encode_array_vcn0),
+                                                            sriov_vcn_4_0_0_video_codecs_decode_array_vcn0,
+                                                            ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_decode_array_vcn0));
+               }
+       }
 
        return 0;
 }
index 12ef782eb4785d38d830de14e2d70b58d749a80b..ceab8783575ca701d0df16d6413f8a9b770d5f9a 100644 (file)
 #include "mxgpu_vi.h"
 #include "amdgpu_dm.h"
 
-#if IS_ENABLED(CONFIG_X86)
-#include <asm/intel-family.h>
-#endif
-
 #define ixPCIE_LC_L1_PM_SUBSTATE       0x100100C6
 #define PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN_MASK       0x00000001L
 #define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE_MASK   0x00000002L
@@ -1138,24 +1134,13 @@ static void vi_enable_aspm(struct amdgpu_device *adev)
                WREG32_PCIE(ixPCIE_LC_CNTL, data);
 }
 
-static bool aspm_support_quirk_check(void)
-{
-#if IS_ENABLED(CONFIG_X86)
-       struct cpuinfo_x86 *c = &cpu_data(0);
-
-       return !(c->x86 == 6 && c->x86_model == INTEL_FAM6_ALDERLAKE);
-#else
-       return true;
-#endif
-}
-
 static void vi_program_aspm(struct amdgpu_device *adev)
 {
        u32 data, data1, orig;
        bool bL1SS = false;
        bool bClkReqSupport = true;
 
-       if (!amdgpu_device_should_use_aspm(adev) || !aspm_support_quirk_check())
+       if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_aspm_support_quirk())
                return;
 
        if (adev->flags & AMD_IS_APU ||
index a0e30f21e12e70a763b0a773895b20daee59b13a..de310ed367ca1dda7c41a437476cf1b70a345677 100644 (file)
@@ -1312,14 +1312,14 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
                args->n_success = i+1;
        }
 
-       mutex_unlock(&p->mutex);
-
        err = amdgpu_amdkfd_gpuvm_sync_memory(dev->adev, (struct kgd_mem *) mem, true);
        if (err) {
                pr_debug("Sync memory failed, wait interrupted by user signal\n");
                goto sync_memory_failed;
        }
 
+       mutex_unlock(&p->mutex);
+
        /* Flush TLBs after waiting for the page table updates to complete */
        for (i = 0; i < args->n_devices; i++) {
                peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
@@ -1335,9 +1335,9 @@ get_process_device_data_failed:
 bind_process_to_device_failed:
 get_mem_obj_from_handle_failed:
 map_memory_to_gpu_failed:
+sync_memory_failed:
        mutex_unlock(&p->mutex);
 copy_from_user_failed:
-sync_memory_failed:
        kfree(devices_arr);
 
        return err;
@@ -1351,6 +1351,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
        void *mem;
        long err = 0;
        uint32_t *devices_arr = NULL, i;
+       bool flush_tlb;
 
        if (!args->n_devices) {
                pr_debug("Device IDs array empty\n");
@@ -1403,16 +1404,19 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
                }
                args->n_success = i+1;
        }
-       mutex_unlock(&p->mutex);
 
-       if (kfd_flush_tlb_after_unmap(pdd->dev)) {
+       flush_tlb = kfd_flush_tlb_after_unmap(pdd->dev);
+       if (flush_tlb) {
                err = amdgpu_amdkfd_gpuvm_sync_memory(pdd->dev->adev,
                                (struct kgd_mem *) mem, true);
                if (err) {
                        pr_debug("Sync memory failed, wait interrupted by user signal\n");
                        goto sync_memory_failed;
                }
+       }
+       mutex_unlock(&p->mutex);
 
+       if (flush_tlb) {
                /* Flush TLBs after waiting for the page table updates to complete */
                for (i = 0; i < args->n_devices; i++) {
                        peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
@@ -1428,9 +1432,9 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
 bind_process_to_device_failed:
 get_mem_obj_from_handle_failed:
 unmap_memory_from_gpu_failed:
+sync_memory_failed:
        mutex_unlock(&p->mutex);
 copy_from_user_failed:
-sync_memory_failed:
        kfree(devices_arr);
        return err;
 }
index 3de7f616a001cf6a8be622f87db2bb286c7e1c3d..ec70a1658dc3871c1dd960af26586a76dbb847a8 100644 (file)
@@ -59,6 +59,7 @@ static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
                                unsigned int chunk_size);
 static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
 
+static int kfd_resume_iommu(struct kfd_dev *kfd);
 static int kfd_resume(struct kfd_dev *kfd);
 
 static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
@@ -624,7 +625,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
 
        svm_migrate_init(kfd->adev);
 
-       if (kgd2kfd_resume_iommu(kfd))
+       if (kfd_resume_iommu(kfd))
                goto device_iommu_error;
 
        if (kfd_resume(kfd))
@@ -772,6 +773,14 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
 }
 
 int kgd2kfd_resume_iommu(struct kfd_dev *kfd)
+{
+       if (!kfd->init_complete)
+               return 0;
+
+       return kfd_resume_iommu(kfd);
+}
+
+static int kfd_resume_iommu(struct kfd_dev *kfd)
 {
        int err = 0;
 
index de8ce72344fc57f14d3198256f458cfe8891797e..54933903bcb8a3a2547dc31d8354c7b1a31b3850 100644 (file)
@@ -289,7 +289,7 @@ static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate)
 static int
 svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
                         struct migrate_vma *migrate, struct dma_fence **mfence,
-                        dma_addr_t *scratch)
+                        dma_addr_t *scratch, uint64_t ttm_res_offset)
 {
        uint64_t npages = migrate->npages;
        struct device *dev = adev->dev;
@@ -299,19 +299,13 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
        uint64_t i, j;
        int r;
 
-       pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
-                prange->last);
+       pr_debug("svms 0x%p [0x%lx 0x%lx 0x%llx]\n", prange->svms, prange->start,
+                prange->last, ttm_res_offset);
 
        src = scratch;
        dst = (uint64_t *)(scratch + npages);
 
-       r = svm_range_vram_node_new(adev, prange, true);
-       if (r) {
-               dev_dbg(adev->dev, "fail %d to alloc vram\n", r);
-               goto out;
-       }
-
-       amdgpu_res_first(prange->ttm_res, prange->offset << PAGE_SHIFT,
+       amdgpu_res_first(prange->ttm_res, ttm_res_offset,
                         npages << PAGE_SHIFT, &cursor);
        for (i = j = 0; i < npages; i++) {
                struct page *spage;
@@ -391,14 +385,14 @@ out_free_vram_pages:
                migrate->dst[i + 3] = 0;
        }
 #endif
-out:
+
        return r;
 }
 
 static long
 svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
                        struct vm_area_struct *vma, uint64_t start,
-                       uint64_t end, uint32_t trigger)
+                       uint64_t end, uint32_t trigger, uint64_t ttm_res_offset)
 {
        struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
        uint64_t npages = (end - start) >> PAGE_SHIFT;
@@ -451,7 +445,7 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
        else
                pr_debug("0x%lx pages migrated\n", cpages);
 
-       r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch);
+       r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch, ttm_res_offset);
        migrate_vma_pages(&migrate);
 
        pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
@@ -499,6 +493,7 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
        unsigned long addr, start, end;
        struct vm_area_struct *vma;
        struct amdgpu_device *adev;
+       uint64_t ttm_res_offset;
        unsigned long cpages = 0;
        long r = 0;
 
@@ -520,6 +515,13 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
        start = prange->start << PAGE_SHIFT;
        end = (prange->last + 1) << PAGE_SHIFT;
 
+       r = svm_range_vram_node_new(adev, prange, true);
+       if (r) {
+               dev_dbg(adev->dev, "fail %ld to alloc vram\n", r);
+               return r;
+       }
+       ttm_res_offset = prange->offset << PAGE_SHIFT;
+
        for (addr = start; addr < end;) {
                unsigned long next;
 
@@ -528,18 +530,21 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
                        break;
 
                next = min(vma->vm_end, end);
-               r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next, trigger);
+               r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next, trigger, ttm_res_offset);
                if (r < 0) {
                        pr_debug("failed %ld to migrate\n", r);
                        break;
                } else {
                        cpages += r;
                }
+               ttm_res_offset += next - addr;
                addr = next;
        }
 
        if (cpages)
                prange->actual_loc = best_loc;
+       else
+               svm_range_vram_node_free(prange);
 
        return r < 0 ? r : 0;
 }
index 09b966dc376818c08e53020dbea1b0f695f68043..aee2212e52f69aea69b6cd06e476c391713a8a1c 100644 (file)
@@ -77,6 +77,7 @@ err_ioctl:
 
 static void kfd_exit(void)
 {
+       kfd_cleanup_processes();
        kfd_debugfs_fini();
        kfd_process_destroy_wq();
        kfd_procfs_shutdown();
index bfa30d12406b35c7c174a6baa550678e4a31c943..7e4d992e48b3c99495763fbf36508b46fb1e7f78 100644 (file)
@@ -928,6 +928,7 @@ bool kfd_dev_is_large_bar(struct kfd_dev *dev);
 
 int kfd_process_create_wq(void);
 void kfd_process_destroy_wq(void);
+void kfd_cleanup_processes(void);
 struct kfd_process *kfd_create_process(struct file *filep);
 struct kfd_process *kfd_get_process(const struct task_struct *task);
 struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid);
index 7acd55a814b2f32999ba4aa9b68db91da30ccfc7..4208e0f01064dd492e7aa85e28c99622bbee90b4 100644 (file)
@@ -1167,6 +1167,17 @@ static void kfd_process_free_notifier(struct mmu_notifier *mn)
        kfd_unref_process(container_of(mn, struct kfd_process, mmu_notifier));
 }
 
+static void kfd_process_notifier_release_internal(struct kfd_process *p)
+{
+       cancel_delayed_work_sync(&p->eviction_work);
+       cancel_delayed_work_sync(&p->restore_work);
+
+       /* Indicate to other users that MM is no longer valid */
+       p->mm = NULL;
+
+       mmu_notifier_put(&p->mmu_notifier);
+}
+
 static void kfd_process_notifier_release(struct mmu_notifier *mn,
                                        struct mm_struct *mm)
 {
@@ -1181,17 +1192,22 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
                return;
 
        mutex_lock(&kfd_processes_mutex);
+       /*
+        * Do early return if table is empty.
+        *
+        * This could potentially happen if this function is called concurrently
+        * by mmu_notifier and by kfd_cleanup_pocesses.
+        *
+        */
+       if (hash_empty(kfd_processes_table)) {
+               mutex_unlock(&kfd_processes_mutex);
+               return;
+       }
        hash_del_rcu(&p->kfd_processes);
        mutex_unlock(&kfd_processes_mutex);
        synchronize_srcu(&kfd_processes_srcu);
 
-       cancel_delayed_work_sync(&p->eviction_work);
-       cancel_delayed_work_sync(&p->restore_work);
-
-       /* Indicate to other users that MM is no longer valid */
-       p->mm = NULL;
-
-       mmu_notifier_put(&p->mmu_notifier);
+       kfd_process_notifier_release_internal(p);
 }
 
 static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
@@ -1200,6 +1216,43 @@ static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
        .free_notifier = kfd_process_free_notifier,
 };
 
+/*
+ * This code handles the case when driver is being unloaded before all
+ * mm_struct are released.  We need to safely free the kfd_process and
+ * avoid race conditions with mmu_notifier that might try to free them.
+ *
+ */
+void kfd_cleanup_processes(void)
+{
+       struct kfd_process *p;
+       struct hlist_node *p_temp;
+       unsigned int temp;
+       HLIST_HEAD(cleanup_list);
+
+       /*
+        * Move all remaining kfd_process from the process table to a
+        * temp list for processing.   Once done, callback from mmu_notifier
+        * release will not see the kfd_process in the table and do early return,
+        * avoiding double free issues.
+        */
+       mutex_lock(&kfd_processes_mutex);
+       hash_for_each_safe(kfd_processes_table, temp, p_temp, p, kfd_processes) {
+               hash_del_rcu(&p->kfd_processes);
+               synchronize_srcu(&kfd_processes_srcu);
+               hlist_add_head(&p->kfd_processes, &cleanup_list);
+       }
+       mutex_unlock(&kfd_processes_mutex);
+
+       hlist_for_each_entry_safe(p, p_temp, &cleanup_list, kfd_processes)
+               kfd_process_notifier_release_internal(p);
+
+       /*
+        * Ensures that all outstanding free_notifier get called, triggering
+        * the release of the kfd_process struct.
+        */
+       mmu_notifier_synchronize();
+}
+
 static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
 {
        unsigned long  offset;
index 5137476ec18e67d521d0ef590eb6cd8f4de94fd1..4236539d9f932e34c62357767972c53c9498f92a 100644 (file)
@@ -218,8 +218,8 @@ static int init_user_queue(struct process_queue_manager *pqm,
        return 0;
 
 cleanup:
-       if (dev->shared_resources.enable_mes)
-               uninit_queue(*q);
+       uninit_queue(*q);
+       *q = NULL;
        return retval;
 }
 
index 009ef917dad47b3eb1df6c10d82663f7ccac35a6..a01fd41643fc2abd4cc2a058c82498503790e0fe 100644 (file)
@@ -5105,9 +5105,9 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
 
                for (; flip_addrs->dirty_rect_count < num_clips; clips++)
                        fill_dc_dirty_rect(new_plane_state->plane,
-                                          &dirty_rects[i], clips->x1,
-                                          clips->y1, clips->x2 - clips->x1,
-                                          clips->y2 - clips->y1,
+                                          &dirty_rects[flip_addrs->dirty_rect_count],
+                                          clips->x1, clips->y1,
+                                          clips->x2 - clips->x1, clips->y2 - clips->y1,
                                           &flip_addrs->dirty_rect_count,
                                           false);
                return;
@@ -7244,7 +7244,6 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
        if (!aconnector->mst_root)
                drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
 
-       /* This defaults to the max in the range, but we want 8bpc for non-edp. */
        aconnector->base.state->max_bpc = 16;
        aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
 
index 8e572f07ec476030258036f6538aba733cd1f1d4..4abfd2c9679f456c15c1cb2cb2e9158eb811ba34 100644 (file)
@@ -561,7 +561,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
        link->dp.mst_enabled = config->mst_enabled;
        link->dp.usb4_enabled = config->usb4_enabled;
        display->adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION;
-       link->adjust.auth_delay = 0;
+       link->adjust.auth_delay = 2;
        link->adjust.hdcp1.disable = 0;
        conn_state = aconnector->base.state;
 
index 3b4d4d68359bb8f8165ea6b126a3fa6969a6246e..df787fcf8e86e06e17646d09f26cdb938d51a318 100644 (file)
@@ -998,8 +998,5 @@ void dcn30_prepare_bandwidth(struct dc *dc,
                        dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz);
 
        dcn20_prepare_bandwidth(dc, context);
-
-       dc_dmub_srv_p_state_delegate(dc,
-               context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching, context);
 }
 
index e4472c6be6c3231a29aa161fcf948f627eb1201f..3fb4bcc343531b6271c4dae8a48802c3426726bf 100644 (file)
@@ -271,8 +271,7 @@ static void dccg32_set_dpstreamclk(
        dccg32_set_dtbclk_p_src(dccg, src, otg_inst);
 
        /* enabled to select one of the DTBCLKs for pipe */
-       switch (otg_inst)
-       {
+       switch (dp_hpo_inst) {
        case 0:
                REG_UPDATE_2(DPSTREAMCLK_CNTL,
                             DPSTREAMCLK0_EN,
index 16f892125b6fac12680d681efa99810bd6847f67..9d14045cccd63e1e2e56307b3e273420d494442a 100644 (file)
@@ -1104,7 +1104,7 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign
                        *k2_div = PIXEL_RATE_DIV_BY_2;
                else
                        *k2_div = PIXEL_RATE_DIV_BY_4;
-       } else if (dc_is_dp_signal(stream->signal) || dc_is_virtual_signal(stream->signal)) {
+       } else if (dc_is_dp_signal(stream->signal)) {
                if (two_pix_per_container) {
                        *k1_div = PIXEL_RATE_DIV_BY_1;
                        *k2_div = PIXEL_RATE_DIV_BY_2;
index 74e50c09bb62f9156a8436d4b86025e5b9b5b8a9..4b7abb4af623599072ec0ede6e300ec82b9c30d8 100644 (file)
@@ -1915,6 +1915,7 @@ int dcn32_populate_dml_pipes_from_context(
        bool subvp_in_use = false;
        uint8_t is_pipe_split_expected[MAX_PIPES] = {0};
        struct dc_crtc_timing *timing;
+       bool vsr_odm_support = false;
 
        dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
 
@@ -1932,12 +1933,15 @@ int dcn32_populate_dml_pipes_from_context(
                timing = &pipe->stream->timing;
 
                pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
+               vsr_odm_support = (res_ctx->pipe_ctx[i].stream->src.width >= 5120 &&
+                               res_ctx->pipe_ctx[i].stream->src.width > res_ctx->pipe_ctx[i].stream->dst.width);
                if (context->stream_count == 1 &&
                                context->stream_status[0].plane_count == 1 &&
                                !dc_is_hdmi_signal(res_ctx->pipe_ctx[i].stream->signal) &&
                                is_h_timing_divisible_by_2(res_ctx->pipe_ctx[i].stream) &&
                                pipe->stream->timing.pix_clk_100hz * 100 > DCN3_2_VMIN_DISPCLK_HZ &&
-                               dc->debug.enable_single_display_2to1_odm_policy) {
+                               dc->debug.enable_single_display_2to1_odm_policy &&
+                               !vsr_odm_support) { //excluding 2to1 ODM combine on >= 5k vsr
                        pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1;
                }
                pipe_cnt++;
@@ -2182,6 +2186,7 @@ static bool dcn32_resource_construct(
        dc->caps.edp_dsc_support = true;
        dc->caps.extended_aux_timeout_support = true;
        dc->caps.dmcub_support = true;
+       dc->caps.seamless_odm = true;
 
        /* Color pipeline capabilities */
        dc->caps.color.dpp.dcn_arch = 1;
index 38216c789d7771459162272aa25148497e8a6ea8..f70025ef7b69edfcef5a28e45f384a686cb009a5 100644 (file)
@@ -855,6 +855,7 @@ static bool detect_link_and_local_sink(struct dc_link *link,
        struct dc_sink *prev_sink = NULL;
        struct dpcd_caps prev_dpcd_caps;
        enum dc_connection_type new_connection_type = dc_connection_none;
+       enum dc_connection_type pre_connection_type = link->type;
        const uint32_t post_oui_delay = 30; // 30ms
 
        DC_LOGGER_INIT(link->ctx->logger);
@@ -957,6 +958,8 @@ static bool detect_link_and_local_sink(struct dc_link *link,
                        }
 
                        if (!detect_dp(link, &sink_caps, reason)) {
+                               link->type = pre_connection_type;
+
                                if (prev_sink)
                                        dc_sink_release(prev_sink);
                                return false;
@@ -1244,11 +1247,16 @@ bool link_detect(struct dc_link *link, enum dc_detect_reason reason)
        bool is_delegated_to_mst_top_mgr = false;
        enum dc_connection_type pre_link_type = link->type;
 
+       DC_LOGGER_INIT(link->ctx->logger);
+
        is_local_sink_detect_success = detect_link_and_local_sink(link, reason);
 
        if (is_local_sink_detect_success && link->local_sink)
                verify_link_capability(link, link->local_sink, reason);
 
+       DC_LOG_DC("%s: link_index=%d is_local_sink_detect_success=%d pre_link_type=%d link_type=%d\n", __func__,
+                               link->link_index, is_local_sink_detect_success, pre_link_type, link->type);
+
        if (is_local_sink_detect_success && link->local_sink &&
                        dc_is_dp_signal(link->local_sink->sink_signal) &&
                        link->dpcd_caps.is_mst_capable)
index f77401709d83cccad7e34d4fab6e5a70e81c3af8..2162ecd1057d1c40286644ac4774c0bfb30dcbf9 100644 (file)
@@ -27,7 +27,7 @@
 // *** IMPORTANT ***
 // SMU TEAM: Always increment the interface version if
 // any structure is changed in this file
-#define PMFW_DRIVER_IF_VERSION 7
+#define PMFW_DRIVER_IF_VERSION 8
 
 typedef struct {
   int32_t value;
@@ -198,7 +198,7 @@ typedef struct {
   uint16_t SkinTemp;
   uint16_t DeviceState;
   uint16_t CurTemp;                     //[centi-Celsius]
-  uint16_t spare2;
+  uint16_t FilterAlphaValue;
 
   uint16_t AverageGfxclkFrequency;
   uint16_t AverageFclkFrequency;
index 1c0ae2cb757b8d30fc4974119300e558a521c1b1..f085cb97a62060ab730d44aaa52f7fcde9080566 100644 (file)
@@ -29,7 +29,7 @@
 #define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04
 #define SMU13_DRIVER_IF_VERSION_ALDE 0x08
 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_0 0x37
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x07
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x08
 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_10 0x32
 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x37
index 697e98a0a20ab9c5afc74305e2eb5b66e6e34a1e..75f18681e984c33a5c7cf83d7a22aa92b794f3c9 100644 (file)
@@ -2143,16 +2143,9 @@ static int sienna_cichlid_set_default_od_settings(struct smu_context *smu)
                (OverDriveTable_t *)smu->smu_table.boot_overdrive_table;
        OverDriveTable_t *user_od_table =
                (OverDriveTable_t *)smu->smu_table.user_overdrive_table;
+       OverDriveTable_t user_od_table_bak;
        int ret = 0;
 
-       /*
-        * For S3/S4/Runpm resume, no need to setup those overdrive tables again as
-        *   - either they already have the default OD settings got during cold bootup
-        *   - or they have some user customized OD settings which cannot be overwritten
-        */
-       if (smu->adev->in_suspend)
-               return 0;
-
        ret = smu_cmn_update_table(smu, SMU_TABLE_OVERDRIVE,
                                   0, (void *)boot_od_table, false);
        if (ret) {
@@ -2163,7 +2156,23 @@ static int sienna_cichlid_set_default_od_settings(struct smu_context *smu)
        sienna_cichlid_dump_od_table(smu, boot_od_table);
 
        memcpy(od_table, boot_od_table, sizeof(OverDriveTable_t));
-       memcpy(user_od_table, boot_od_table, sizeof(OverDriveTable_t));
+
+       /*
+        * For S3/S4/Runpm resume, we need to setup those overdrive tables again,
+        * but we have to preserve user defined values in "user_od_table".
+        */
+       if (!smu->adev->in_suspend) {
+               memcpy(user_od_table, boot_od_table, sizeof(OverDriveTable_t));
+               smu->user_dpm_profile.user_od = false;
+       } else if (smu->user_dpm_profile.user_od) {
+               memcpy(&user_od_table_bak, user_od_table, sizeof(OverDriveTable_t));
+               memcpy(user_od_table, boot_od_table, sizeof(OverDriveTable_t));
+               user_od_table->GfxclkFmin = user_od_table_bak.GfxclkFmin;
+               user_od_table->GfxclkFmax = user_od_table_bak.GfxclkFmax;
+               user_od_table->UclkFmin = user_od_table_bak.UclkFmin;
+               user_od_table->UclkFmax = user_od_table_bak.UclkFmax;
+               user_od_table->VddGfxOffset = user_od_table_bak.VddGfxOffset;
+       }
 
        return 0;
 }
@@ -2373,6 +2382,20 @@ static int sienna_cichlid_od_edit_dpm_table(struct smu_context *smu,
        return ret;
 }
 
+static int sienna_cichlid_restore_user_od_settings(struct smu_context *smu)
+{
+       struct smu_table_context *table_context = &smu->smu_table;
+       OverDriveTable_t *od_table = table_context->overdrive_table;
+       OverDriveTable_t *user_od_table = table_context->user_overdrive_table;
+       int res;
+
+       res = smu_v11_0_restore_user_od_settings(smu);
+       if (res == 0)
+               memcpy(od_table, user_od_table, sizeof(OverDriveTable_t));
+
+       return res;
+}
+
 static int sienna_cichlid_run_btc(struct smu_context *smu)
 {
        int res;
@@ -4400,7 +4423,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
        .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
        .set_default_od_settings = sienna_cichlid_set_default_od_settings,
        .od_edit_dpm_table = sienna_cichlid_od_edit_dpm_table,
-       .restore_user_od_settings = smu_v11_0_restore_user_od_settings,
+       .restore_user_od_settings = sienna_cichlid_restore_user_od_settings,
        .run_btc = sienna_cichlid_run_btc,
        .set_power_source = smu_v11_0_set_power_source,
        .get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
index 2019a8167d693d516b118ae2d1d37ace24276067..b40baced1331666372e98115091e2d08b9ba5a2e 100644 (file)
@@ -676,8 +676,8 @@ static int lt8912_parse_dt(struct lt8912 *lt)
 
        lt->hdmi_port = of_drm_find_bridge(port_node);
        if (!lt->hdmi_port) {
-               dev_err(lt->dev, "%s: Failed to get hdmi port\n", __func__);
-               ret = -ENODEV;
+               ret = -EPROBE_DEFER;
+               dev_err_probe(lt->dev, ret, "%s: Failed to get hdmi port\n", __func__);
                goto err_free_host_node;
        }
 
index 3d0a4da661bc985c5c2b144fecd2d2de80677f26..261a62e1593416e5228b4a859788e798b1e4cf59 100644 (file)
@@ -2796,7 +2796,7 @@ u32 drm_edid_get_panel_id(struct i2c_adapter *adapter)
         * the EDID then we'll just return 0.
         */
 
-       base_block = kmalloc(EDID_LENGTH, GFP_KERNEL);
+       base_block = kzalloc(EDID_LENGTH, GFP_KERNEL);
        if (!base_block)
                return 0;
 
index 7a3cb08dc942e1703b0ab7e2767328d501e44821..a5d392f7e11f6a1bb7ef0578de0f6a6f39e406fd 100644 (file)
@@ -1388,10 +1388,13 @@ EXPORT_SYMBOL(drm_gem_lru_move_tail);
  *
  * @lru: The LRU to scan
  * @nr_to_scan: The number of pages to try to reclaim
+ * @remaining: The number of pages left to reclaim, should be initialized by caller
  * @shrink: Callback to try to shrink/reclaim the object.
  */
 unsigned long
-drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan,
+drm_gem_lru_scan(struct drm_gem_lru *lru,
+                unsigned int nr_to_scan,
+                unsigned long *remaining,
                 bool (*shrink)(struct drm_gem_object *obj))
 {
        struct drm_gem_lru still_in_lru;
@@ -1430,8 +1433,10 @@ drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan,
                 * hit shrinker in response to trying to get backing pages
                 * for this obj (ie. while it's lock is already held)
                 */
-               if (!dma_resv_trylock(obj->resv))
+               if (!dma_resv_trylock(obj->resv)) {
+                       *remaining += obj->size >> PAGE_SHIFT;
                        goto tail;
+               }
 
                if (shrink(obj)) {
                        freed += obj->size >> PAGE_SHIFT;
index 75185a960fc408f1042999e4c9b6c04baef6831b..2b2163c8138ef488563c996ff0334cf80add92bb 100644 (file)
@@ -619,11 +619,14 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct
        int ret;
 
        if (obj->import_attach) {
-               /* Drop the reference drm_gem_mmap_obj() acquired.*/
-               drm_gem_object_put(obj);
                vma->vm_private_data = NULL;
+               ret = dma_buf_mmap(obj->dma_buf, vma, 0);
+
+               /* Drop the reference drm_gem_mmap_obj() acquired.*/
+               if (!ret)
+                       drm_gem_object_put(obj);
 
-               return dma_buf_mmap(obj->dma_buf, vma, 0);
+               return ret;
        }
 
        ret = drm_gem_shmem_get_pages(shmem);
index 5522d610c5cfdea29aeb587f93017a55d4224580..b1a38e6ce2f8fad28bbef491a02c29b7257beaa1 100644 (file)
@@ -328,10 +328,17 @@ static const struct dmi_system_id orientation_data[] = {
                  DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "IdeaPad Duet 3 10IGL5"),
                },
                .driver_data = (void *)&lcd1200x1920_rightside_up,
-       }, {    /* Lenovo Yoga Book X90F / X91F / X91L */
+       }, {    /* Lenovo Yoga Book X90F / X90L */
                .matches = {
-                 /* Non exact match to match all versions */
-                 DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9"),
+                 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+                 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
+                 DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"),
+               },
+               .driver_data = (void *)&lcd1200x1920_rightside_up,
+       }, {    /* Lenovo Yoga Book X91F / X91L */
+               .matches = {
+                 /* Non exact match to match F + L versions */
+                 DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X91"),
                },
                .driver_data = (void *)&lcd1200x1920_rightside_up,
        }, {    /* Lenovo Yoga Tablet 2 830F / 830L */
index 82be0fbe99342bf901b9473f5f95afe4fedd8b2c..d5b5d40ed817f264fcd0c0219d22bd931caca2f5 100644 (file)
@@ -683,6 +683,14 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
         */
        intel_vrr_send_push(new_crtc_state);
 
+       /*
+        * Seamless M/N update may need to update frame timings.
+        *
+        * FIXME Should be synchronized with the start of vblank somehow...
+        */
+       if (new_crtc_state->seamless_m_n && intel_crtc_needs_fastset(new_crtc_state))
+               intel_crtc_update_active_timings(new_crtc_state);
+
        local_irq_enable();
 
        if (intel_vgpu_active(dev_priv))
index d3994e2a7d6362e77dd50a67f6f2ad070a328f2f..208b1b5b15dd4019eb0216293675f2c54326715b 100644 (file)
@@ -5145,6 +5145,7 @@ intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
         * only fields that are know to not cause problems are preserved. */
 
        saved_state->uapi = crtc_state->uapi;
+       saved_state->inherited = crtc_state->inherited;
        saved_state->scaler_state = crtc_state->scaler_state;
        saved_state->shared_dpll = crtc_state->shared_dpll;
        saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
index 54c517ca9632fb1be3150c40a3935cbbbf63956c..582234f0c49ace7bb3af97a86565c130a28a52eb 100644 (file)
@@ -1631,6 +1631,8 @@ struct intel_psr {
        bool psr2_sel_fetch_cff_enabled;
        bool req_psr2_sdp_prior_scanline;
        u8 sink_sync_latency;
+       u8 io_wake_lines;
+       u8 fast_wake_lines;
        ktime_t last_entry_attempt;
        ktime_t last_exit;
        bool sink_not_reliable;
index 257aa2b7cf2045d116a98088f8fa6db5a36935dd..3485d5e6dd3c75d21d4dfe9abcf4d7d369f67260 100644 (file)
@@ -384,15 +384,12 @@ static void disable_all_event_handlers(struct drm_i915_private *i915)
        }
 }
 
-static void pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable)
+static void adlp_pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable)
 {
        enum pipe pipe;
 
-       if (DISPLAY_VER(i915) < 13)
-               return;
-
        /*
-        * Wa_16015201720:adl-p,dg2, mtl
+        * Wa_16015201720:adl-p,dg2
         * The WA requires clock gating to be disabled all the time
         * for pipe A and B.
         * For pipe C and D clock gating needs to be disabled only
@@ -408,6 +405,25 @@ static void pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable)
                                     PIPEDMC_GATING_DIS, 0);
 }
 
+static void mtl_pipedmc_clock_gating_wa(struct drm_i915_private *i915)
+{
+       /*
+        * Wa_16015201720
+        * The WA requires clock gating to be disabled all the time
+        * for pipe A and B.
+        */
+       intel_de_rmw(i915, GEN9_CLKGATE_DIS_0, 0,
+                    MTL_PIPEDMC_GATING_DIS_A | MTL_PIPEDMC_GATING_DIS_B);
+}
+
+static void pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable)
+{
+       if (DISPLAY_VER(i915) >= 14 && enable)
+               mtl_pipedmc_clock_gating_wa(i915);
+       else if (DISPLAY_VER(i915) == 13)
+               adlp_pipedmc_clock_gating_wa(i915, enable);
+}
+
 void intel_dmc_enable_pipe(struct drm_i915_private *i915, enum pipe pipe)
 {
        if (!has_dmc_id_fw(i915, PIPE_TO_DMC_ID(pipe)))
index 054a009e800d77e39fdbdd7d94e48f6266779348..2106b3de225a0663675acc2685ff4e4e1cb566d3 100644 (file)
@@ -265,6 +265,19 @@ static int intel_dp_mst_update_slots(struct intel_encoder *encoder,
        return 0;
 }
 
+static bool intel_dp_mst_has_audio(const struct drm_connector_state *conn_state)
+{
+       const struct intel_digital_connector_state *intel_conn_state =
+               to_intel_digital_connector_state(conn_state);
+       struct intel_connector *connector =
+               to_intel_connector(conn_state->connector);
+
+       if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
+               return connector->port->has_audio;
+       else
+               return intel_conn_state->force_audio == HDMI_AUDIO_ON;
+}
+
 static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
                                       struct intel_crtc_state *pipe_config,
                                       struct drm_connector_state *conn_state)
@@ -272,10 +285,6 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
        struct intel_dp *intel_dp = &intel_mst->primary->dp;
-       struct intel_connector *connector =
-               to_intel_connector(conn_state->connector);
-       struct intel_digital_connector_state *intel_conn_state =
-               to_intel_digital_connector_state(conn_state);
        const struct drm_display_mode *adjusted_mode =
                &pipe_config->hw.adjusted_mode;
        struct link_config_limits limits;
@@ -287,11 +296,9 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
        pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
        pipe_config->has_pch_encoder = false;
 
-       if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
-               pipe_config->has_audio = connector->port->has_audio;
-       else
-               pipe_config->has_audio =
-                       intel_conn_state->force_audio == HDMI_AUDIO_ON;
+       pipe_config->has_audio =
+               intel_dp_mst_has_audio(conn_state) &&
+               intel_audio_compute_config(encoder, pipe_config, conn_state);
 
        /*
         * for MST we always configure max link bw - the spec doesn't
index f76b06293eb94293f222fa64cd3c67f664302862..38825b30db16cc3a87ce84756ec4e089b3c662f1 100644 (file)
@@ -210,6 +210,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
        bool prealloc = false;
        void __iomem *vaddr;
        struct drm_i915_gem_object *obj;
+       struct i915_gem_ww_ctx ww;
        int ret;
 
        mutex_lock(&ifbdev->hpd_lock);
@@ -283,13 +284,24 @@ static int intelfb_create(struct drm_fb_helper *helper,
                info->fix.smem_len = vma->size;
        }
 
-       vaddr = i915_vma_pin_iomap(vma);
-       if (IS_ERR(vaddr)) {
-               drm_err(&dev_priv->drm,
-                       "Failed to remap framebuffer into virtual memory (%pe)\n", vaddr);
-               ret = PTR_ERR(vaddr);
-               goto out_unpin;
+       for_i915_gem_ww(&ww, ret, false) {
+               ret = i915_gem_object_lock(vma->obj, &ww);
+
+               if (ret)
+                       continue;
+
+               vaddr = i915_vma_pin_iomap(vma);
+               if (IS_ERR(vaddr)) {
+                       drm_err(&dev_priv->drm,
+                               "Failed to remap framebuffer into virtual memory (%pe)\n", vaddr);
+                       ret = PTR_ERR(vaddr);
+                       continue;
+               }
        }
+
+       if (ret)
+               goto out_unpin;
+
        info->screen_base = vaddr;
        info->screen_size = vma->size;
 
index 7a72e15e68369f2e2bdbf58afabd48ab06c5de68..9f1a0bebae24086964adc83bbacee3668a174105 100644 (file)
@@ -542,6 +542,14 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
        val |= EDP_PSR2_FRAME_BEFORE_SU(max_t(u8, intel_dp->psr.sink_sync_latency + 1, 2));
        val |= intel_psr2_get_tp_time(intel_dp);
 
+       if (DISPLAY_VER(dev_priv) >= 12) {
+               if (intel_dp->psr.io_wake_lines < 9 &&
+                   intel_dp->psr.fast_wake_lines < 9)
+                       val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
+               else
+                       val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
+       }
+
        /* Wa_22012278275:adl-p */
        if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
                static const u8 map[] = {
@@ -558,31 +566,21 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
                 * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
                 * comments bellow for more information
                 */
-               u32 tmp, lines = 7;
-
-               val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
+               u32 tmp;
 
-               tmp = map[lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
+               tmp = map[intel_dp->psr.io_wake_lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
                tmp = tmp << TGL_EDP_PSR2_IO_BUFFER_WAKE_SHIFT;
                val |= tmp;
 
-               tmp = map[lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
+               tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
                tmp = tmp << TGL_EDP_PSR2_FAST_WAKE_MIN_SHIFT;
                val |= tmp;
        } else if (DISPLAY_VER(dev_priv) >= 12) {
-               /*
-                * TODO: 7 lines of IO_BUFFER_WAKE and FAST_WAKE are default
-                * values from BSpec. In order to setting an optimal power
-                * consumption, lower than 4k resolution mode needs to decrease
-                * IO_BUFFER_WAKE and FAST_WAKE. And higher than 4K resolution
-                * mode needs to increase IO_BUFFER_WAKE and FAST_WAKE.
-                */
-               val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
-               val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(7);
-               val |= TGL_EDP_PSR2_FAST_WAKE(7);
+               val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
+               val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
        } else if (DISPLAY_VER(dev_priv) >= 9) {
-               val |= EDP_PSR2_IO_BUFFER_WAKE(7);
-               val |= EDP_PSR2_FAST_WAKE(7);
+               val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
+               val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
        }
 
        if (intel_dp->psr.req_psr2_sdp_prior_scanline)
@@ -842,6 +840,46 @@ static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_d
        return true;
 }
 
+static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
+                                    struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+       int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
+       u8 max_wake_lines;
+
+       if (DISPLAY_VER(i915) >= 12) {
+               io_wake_time = 42;
+               /*
+                * According to Bspec it's 42us, but based on testing
+                * it is not enough -> use 45 us.
+                */
+               fast_wake_time = 45;
+               max_wake_lines = 12;
+       } else {
+               io_wake_time = 50;
+               fast_wake_time = 32;
+               max_wake_lines = 8;
+       }
+
+       io_wake_lines = intel_usecs_to_scanlines(
+               &crtc_state->uapi.adjusted_mode, io_wake_time);
+       fast_wake_lines = intel_usecs_to_scanlines(
+               &crtc_state->uapi.adjusted_mode, fast_wake_time);
+
+       if (io_wake_lines > max_wake_lines ||
+           fast_wake_lines > max_wake_lines)
+               return false;
+
+       if (i915->params.psr_safest_params)
+               io_wake_lines = fast_wake_lines = max_wake_lines;
+
+       /* According to Bspec lower limit should be set as 7 lines. */
+       intel_dp->psr.io_wake_lines = max(io_wake_lines, 7);
+       intel_dp->psr.fast_wake_lines = max(fast_wake_lines, 7);
+
+       return true;
+}
+
 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
                                    struct intel_crtc_state *crtc_state)
 {
@@ -936,6 +974,12 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
                return false;
        }
 
+       if (!_compute_psr2_wake_times(intel_dp, crtc_state)) {
+               drm_dbg_kms(&dev_priv->drm,
+                           "PSR2 not enabled, Unable to use long enough wake times\n");
+               return false;
+       }
+
        if (HAS_PSR2_SEL_FETCH(dev_priv)) {
                if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
                    !HAS_PSR_HW_TRACKING(dev_priv)) {
index c65c771f5c461f018bcc4620809782571109eea0..1cfb94b5cedbdfa757f0a16e3f25f73e5d28e37c 100644 (file)
@@ -1419,6 +1419,36 @@ static const struct intel_mpllb_state dg2_hdmi_262750 = {
                REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
 };
 
+static const struct intel_mpllb_state dg2_hdmi_267300 = {
+       .clock = 267300,
+       .ref_control =
+               REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+       .mpllb_cp =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+       .mpllb_div =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+       .mpllb_div2 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 74) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+       .mpllb_fracn1 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+       .mpllb_fracn2 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 30146) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 36699),
+       .mpllb_sscen =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
 static const struct intel_mpllb_state dg2_hdmi_268500 = {
        .clock = 268500,
        .ref_control =
@@ -1509,6 +1539,36 @@ static const struct intel_mpllb_state dg2_hdmi_241500 = {
                REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
 };
 
+static const struct intel_mpllb_state dg2_hdmi_319890 = {
+       .clock = 319890,
+       .ref_control =
+               REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+       .mpllb_cp =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+       .mpllb_div =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2),
+       .mpllb_div2 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 94) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+       .mpllb_fracn1 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+       .mpllb_fracn2 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 64094) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13631),
+       .mpllb_sscen =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
 static const struct intel_mpllb_state dg2_hdmi_497750 = {
        .clock = 497750,
        .ref_control =
@@ -1696,8 +1756,10 @@ static const struct intel_mpllb_state * const dg2_hdmi_tables[] = {
        &dg2_hdmi_209800,
        &dg2_hdmi_241500,
        &dg2_hdmi_262750,
+       &dg2_hdmi_267300,
        &dg2_hdmi_268500,
        &dg2_hdmi_296703,
+       &dg2_hdmi_319890,
        &dg2_hdmi_497750,
        &dg2_hdmi_592000,
        &dg2_hdmi_593407,
index f0dbfc434e077357729e3e3f3f9c9ac50d9e5125..40d357cf8b042f835f9c96943d3b7b0cc5b068d9 100644 (file)
@@ -737,12 +737,12 @@ int intel_gt_init(struct intel_gt *gt)
        if (err)
                goto err_gt;
 
-       intel_uc_init_late(&gt->uc);
-
        err = i915_inject_probe_error(gt->i915, -EIO);
        if (err)
                goto err_gt;
 
+       intel_uc_init_late(&gt->uc);
+
        intel_migrate_init(&gt->migrate, gt);
 
        goto out_fw;
index cef3d6f5c34e0130b4943d48a456c75da31b275e..56b993f6e7dc9337aa98d24b08748606d9e8a922 100644 (file)
 #include "intel_rc6.h"
 #include "intel_rps.h"
 #include "intel_wakeref.h"
-#include "intel_pcode.h"
 #include "pxp/intel_pxp_pm.h"
 
 #define I915_GT_SUSPEND_IDLE_TIMEOUT (HZ / 2)
 
-static void mtl_media_busy(struct intel_gt *gt)
-{
-       /* Wa_14017073508: mtl */
-       if (IS_MTL_GRAPHICS_STEP(gt->i915, P, STEP_A0, STEP_B0) &&
-           gt->type == GT_MEDIA)
-               snb_pcode_write_p(gt->uncore, PCODE_MBOX_GT_STATE,
-                                 PCODE_MBOX_GT_STATE_MEDIA_BUSY,
-                                 PCODE_MBOX_GT_STATE_DOMAIN_MEDIA, 0);
-}
-
-static void mtl_media_idle(struct intel_gt *gt)
-{
-       /* Wa_14017073508: mtl */
-       if (IS_MTL_GRAPHICS_STEP(gt->i915, P, STEP_A0, STEP_B0) &&
-           gt->type == GT_MEDIA)
-               snb_pcode_write_p(gt->uncore, PCODE_MBOX_GT_STATE,
-                                 PCODE_MBOX_GT_STATE_MEDIA_NOT_BUSY,
-                                 PCODE_MBOX_GT_STATE_DOMAIN_MEDIA, 0);
-}
-
 static void user_forcewake(struct intel_gt *gt, bool suspend)
 {
        int count = atomic_read(&gt->user_wakeref);
@@ -93,9 +72,6 @@ static int __gt_unpark(struct intel_wakeref *wf)
 
        GT_TRACE(gt, "\n");
 
-       /* Wa_14017073508: mtl */
-       mtl_media_busy(gt);
-
        /*
         * It seems that the DMC likes to transition between the DC states a lot
         * when there are no connected displays (no active power domains) during
@@ -145,9 +121,6 @@ static int __gt_park(struct intel_wakeref *wf)
        GEM_BUG_ON(!wakeref);
        intel_display_power_put_async(i915, POWER_DOMAIN_GT_IRQ, wakeref);
 
-       /* Wa_14017073508: mtl */
-       mtl_media_idle(gt);
-
        return 0;
 }
 
index 83df4cd5e06cb929f09d79a6a3f14bfb373e38c7..80dbbef86b1dbf2313311bbe82df2a651179120a 100644 (file)
@@ -580,7 +580,7 @@ static bool perf_limit_reasons_eval(void *data)
 }
 
 DEFINE_SIMPLE_ATTRIBUTE(perf_limit_reasons_fops, perf_limit_reasons_get,
-                       perf_limit_reasons_clear, "%llu\n");
+                       perf_limit_reasons_clear, "0x%llx\n");
 
 void intel_gt_pm_debugfs_register(struct intel_gt *gt, struct dentry *root)
 {
index 5c91622dfca420bc37c7c200b9e20927a14ca720..f4150f61f39c0be7cae48e67fc3430a7640ce0c1 100644 (file)
@@ -486,6 +486,7 @@ static bool bxt_check_bios_rc6_setup(struct intel_rc6 *rc6)
 static bool rc6_supported(struct intel_rc6 *rc6)
 {
        struct drm_i915_private *i915 = rc6_to_i915(rc6);
+       struct intel_gt *gt = rc6_to_gt(rc6);
 
        if (!HAS_RC6(i915))
                return false;
@@ -502,6 +503,13 @@ static bool rc6_supported(struct intel_rc6 *rc6)
                return false;
        }
 
+       if (IS_MTL_MEDIA_STEP(gt->i915, STEP_A0, STEP_B0) &&
+           gt->type == GT_MEDIA) {
+               drm_notice(&i915->drm,
+                          "Media RC6 disabled on A step\n");
+               return false;
+       }
+
        return true;
 }
 
index aa87d3832d60d92539413d6cc5f840873c65696a..d7e8c374f153e01ed26d46e3bdb3bd90a72d0651 100644 (file)
@@ -27,7 +27,7 @@ struct drm_printer;
  * is only relevant to pre-Xe_HP platforms (Xe_HP and beyond use the
  * I915_MAX_SS_FUSE_BITS value below).
  */
-#define GEN_MAX_SS_PER_HSW_SLICE       6
+#define GEN_MAX_SS_PER_HSW_SLICE       8
 
 /*
  * Maximum number of 32-bit registers used by hardware to express the
index fc3b994626a4fdc9f3d61fee47c23d304d3a5189..710999d7189ee3537f7436495e3ef70b7182fed4 100644 (file)
@@ -1571,6 +1571,27 @@ int intel_guc_capture_print_engine_node(struct drm_i915_error_state_buf *ebuf,
 
 #endif //CONFIG_DRM_I915_CAPTURE_ERROR
 
+static void guc_capture_find_ecode(struct intel_engine_coredump *ee)
+{
+       struct gcap_reg_list_info *reginfo;
+       struct guc_mmio_reg *regs;
+       i915_reg_t reg_ipehr = RING_IPEHR(0);
+       i915_reg_t reg_instdone = RING_INSTDONE(0);
+       int i;
+
+       if (!ee->guc_capture_node)
+               return;
+
+       reginfo = ee->guc_capture_node->reginfo + GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE;
+       regs = reginfo->regs;
+       for (i = 0; i < reginfo->num_regs; i++) {
+               if (regs[i].offset == reg_ipehr.reg)
+                       ee->ipehr = regs[i].value;
+               else if (regs[i].offset == reg_instdone.reg)
+                       ee->instdone.instdone = regs[i].value;
+       }
+}
+
 void intel_guc_capture_free_node(struct intel_engine_coredump *ee)
 {
        if (!ee || !ee->guc_capture_node)
@@ -1612,6 +1633,7 @@ void intel_guc_capture_get_matching_node(struct intel_gt *gt,
                        list_del(&n->link);
                        ee->guc_capture_node = n;
                        ee->guc_capture = guc->capture;
+                       guc_capture_find_ecode(ee);
                        return;
                }
        }
index b5855091cf6a92af7f6a14f0896d59f25a6fe8a4..8f8dd05835c5aaf766e0f70ccbba6a3c85904dda 100644 (file)
 
 static bool __guc_rc_supported(struct intel_guc *guc)
 {
-       struct intel_gt *gt = guc_to_gt(guc);
-
-       /*
-        * Wa_14017073508: mtl
-        * Do not enable gucrc to avoid additional interrupts which
-        * may disrupt pcode wa.
-        */
-       if (IS_MTL_GRAPHICS_STEP(gt->i915, P, STEP_A0, STEP_B0) &&
-           gt->type == GT_MEDIA)
-               return false;
-
        /* GuC RC is unavailable for pre-Gen12 */
        return guc->submission_supported &&
-               GRAPHICS_VER(gt->i915) >= 12;
+               GRAPHICS_VER(guc_to_gt(guc)->i915) >= 12;
 }
 
 static bool __guc_rc_selected(struct intel_guc *guc)
index 7412abf166a8c366e2711a120ca9b65acc5294e7..8ef93889061a6367e3558137a816efa0e32599da 100644 (file)
@@ -92,8 +92,7 @@ static void debug_active_init(struct i915_active *ref)
 static void debug_active_activate(struct i915_active *ref)
 {
        lockdep_assert_held(&ref->tree_lock);
-       if (!atomic_read(&ref->count)) /* before the first inc */
-               debug_object_activate(ref, &active_debug_desc);
+       debug_object_activate(ref, &active_debug_desc);
 }
 
 static void debug_active_deactivate(struct i915_active *ref)
@@ -422,12 +421,12 @@ replace_barrier(struct i915_active *ref, struct i915_active_fence *active)
         * we can use it to substitute for the pending idle-barrer
         * request that we want to emit on the kernel_context.
         */
-       __active_del_barrier(ref, node_from_active(active));
-       return true;
+       return __active_del_barrier(ref, node_from_active(active));
 }
 
 int i915_active_add_request(struct i915_active *ref, struct i915_request *rq)
 {
+       u64 idx = i915_request_timeline(rq)->fence_context;
        struct dma_fence *fence = &rq->fence;
        struct i915_active_fence *active;
        int err;
@@ -437,16 +436,19 @@ int i915_active_add_request(struct i915_active *ref, struct i915_request *rq)
        if (err)
                return err;
 
-       active = active_instance(ref, i915_request_timeline(rq)->fence_context);
-       if (!active) {
-               err = -ENOMEM;
-               goto out;
-       }
+       do {
+               active = active_instance(ref, idx);
+               if (!active) {
+                       err = -ENOMEM;
+                       goto out;
+               }
+
+               if (replace_barrier(ref, active)) {
+                       RCU_INIT_POINTER(active->fence, NULL);
+                       atomic_dec(&ref->count);
+               }
+       } while (unlikely(is_barrier(active)));
 
-       if (replace_barrier(ref, active)) {
-               RCU_INIT_POINTER(active->fence, NULL);
-               atomic_dec(&ref->count);
-       }
        if (!__i915_active_fence_set(active, fence))
                __i915_active_acquire(ref);
 
index 3b2642397b8288f617fcfb291516409a3ae0b9ea..747b53b567a091fa2e8a65d1899d3c167e6167f3 100644 (file)
  * GEN9 clock gating regs
  */
 #define GEN9_CLKGATE_DIS_0             _MMIO(0x46530)
-#define   DARBF_GATING_DIS             (1 << 27)
-#define   PWM2_GATING_DIS              (1 << 14)
-#define   PWM1_GATING_DIS              (1 << 13)
+#define   DARBF_GATING_DIS             REG_BIT(27)
+#define   MTL_PIPEDMC_GATING_DIS_A     REG_BIT(15)
+#define   MTL_PIPEDMC_GATING_DIS_B     REG_BIT(14)
+#define   PWM2_GATING_DIS              REG_BIT(14)
+#define   PWM1_GATING_DIS              REG_BIT(13)
 
 #define GEN9_CLKGATE_DIS_3             _MMIO(0x46538)
 #define   TGL_VRH_GATING_DIS           REG_BIT(31)
 /*   XEHP_PCODE_FREQUENCY_CONFIG param2 */
 #define     PCODE_MBOX_DOMAIN_NONE             0x0
 #define     PCODE_MBOX_DOMAIN_MEDIAFF          0x3
-
-/* Wa_14017210380: mtl */
-#define   PCODE_MBOX_GT_STATE                  0x50
-/* sub-commands (param1) */
-#define     PCODE_MBOX_GT_STATE_MEDIA_BUSY     0x1
-#define     PCODE_MBOX_GT_STATE_MEDIA_NOT_BUSY 0x2
-/* param2 */
-#define     PCODE_MBOX_GT_STATE_DOMAIN_MEDIA   0x1
-
 #define GEN6_PCODE_DATA                                _MMIO(0x138128)
 #define   GEN6_PCODE_FREQ_IA_RATIO_SHIFT       8
 #define   GEN6_PCODE_FREQ_RING_RATIO_SHIFT     16
index 79bfe3938d3c6abfd6829f6435ab98167dcdc490..7caf937c3c90d6b7d74456d7f276871ec8527547 100644 (file)
@@ -325,23 +325,23 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
 
        ret = meson_encoder_hdmi_init(priv);
        if (ret)
-               goto exit_afbcd;
+               goto unbind_all;
 
        ret = meson_plane_create(priv);
        if (ret)
-               goto exit_afbcd;
+               goto unbind_all;
 
        ret = meson_overlay_create(priv);
        if (ret)
-               goto exit_afbcd;
+               goto unbind_all;
 
        ret = meson_crtc_create(priv);
        if (ret)
-               goto exit_afbcd;
+               goto unbind_all;
 
        ret = request_irq(priv->vsync_irq, meson_irq, 0, drm->driver->name, drm);
        if (ret)
-               goto exit_afbcd;
+               goto unbind_all;
 
        drm_mode_config_reset(drm);
 
@@ -359,6 +359,9 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
 
 uninstall_irq:
        free_irq(priv->vsync_irq, drm);
+unbind_all:
+       if (has_components)
+               component_unbind_all(drm->dev, drm);
 exit_afbcd:
        if (priv->afbcd.ops)
                priv->afbcd.ops->exit(priv);
index 534621a13a34d28140541e8d881dafb24f64a78a..3d046878ce6cb74f5a3662e795e091be9d7b4b75 100644 (file)
@@ -718,7 +718,7 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
        dw_plat_data = &meson_dw_hdmi->dw_plat_data;
 
        ret = devm_regulator_get_enable_optional(dev, "hdmi");
-       if (ret < 0)
+       if (ret < 0 && ret != -ENODEV)
                return ret;
 
        meson_dw_hdmi->hdmitx_apb = devm_reset_control_get_exclusive(dev,
index 154837688ab0d17b98f61d81918b99edb3441621..5df1957c8e41f4e438545f91dd9eecb423e53b91 100644 (file)
@@ -100,6 +100,8 @@ void meson_vpp_init(struct meson_drm *priv)
                               priv->io_base + _REG(VPP_DOLBY_CTRL));
                writel_relaxed(0x1020080,
                                priv->io_base + _REG(VPP_DUMMY_DATA1));
+               writel_relaxed(0x42020,
+                               priv->io_base + _REG(VPP_DUMMY_DATA));
        } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
                writel_relaxed(0xf, priv->io_base + _REG(DOLBY_PATH_CTRL));
 
index 051bdbc093cf99657b44671a12a317fcd5fefd96..f38296ad87434eca9aead0f47b47d0c4bcc896cf 100644 (file)
@@ -107,6 +107,7 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
                bool (*shrink)(struct drm_gem_object *obj);
                bool cond;
                unsigned long freed;
+               unsigned long remaining;
        } stages[] = {
                /* Stages of progressively more aggressive/expensive reclaim: */
                { &priv->lru.dontneed, purge,        true },
@@ -116,14 +117,18 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
        };
        long nr = sc->nr_to_scan;
        unsigned long freed = 0;
+       unsigned long remaining = 0;
 
        for (unsigned i = 0; (nr > 0) && (i < ARRAY_SIZE(stages)); i++) {
                if (!stages[i].cond)
                        continue;
                stages[i].freed =
-                       drm_gem_lru_scan(stages[i].lru, nr, stages[i].shrink);
+                       drm_gem_lru_scan(stages[i].lru, nr,
+                                       &stages[i].remaining,
+                                        stages[i].shrink);
                nr -= stages[i].freed;
                freed += stages[i].freed;
+               remaining += stages[i].remaining;
        }
 
        if (freed) {
@@ -132,7 +137,7 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
                                     stages[3].freed);
        }
 
-       return (freed > 0) ? freed : SHRINK_STOP;
+       return (freed > 0 && remaining > 0) ? freed : SHRINK_STOP;
 }
 
 #ifdef CONFIG_DEBUG_FS
@@ -182,10 +187,12 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
                NULL,
        };
        unsigned idx, unmapped = 0;
+       unsigned long remaining = 0;
 
        for (idx = 0; lrus[idx] && unmapped < vmap_shrink_limit; idx++) {
                unmapped += drm_gem_lru_scan(lrus[idx],
                                             vmap_shrink_limit - unmapped,
+                                            &remaining,
                                             vmap_shrink);
        }
 
index 4e83a1891f3edc493ac3c5b9bbe1b7eab8b5b52b..666a5e53fe1938dabd3dbc244b2601758205f2c4 100644 (file)
@@ -282,7 +282,7 @@ static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
        if (pm_runtime_active(pfdev->dev))
                mmu_hw_do_operation(pfdev, mmu, iova, size, AS_COMMAND_FLUSH_PT);
 
-       pm_runtime_put_sync_autosuspend(pfdev->dev);
+       pm_runtime_put_autosuspend(pfdev->dev);
 }
 
 static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
index cc94efbbf2d4eede4b9f4f95c15303273d056ced..d6c741716167ae8e2f3ee485b49df4886943bcf9 100644 (file)
@@ -95,12 +95,12 @@ static int sun4i_drv_bind(struct device *dev)
        /* drm_vblank_init calls kcalloc, which can fail */
        ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
        if (ret)
-               goto cleanup_mode_config;
+               goto unbind_all;
 
        /* Remove early framebuffers (ie. simplefb) */
        ret = drm_aperture_remove_framebuffers(false, &sun4i_drv_driver);
        if (ret)
-               goto cleanup_mode_config;
+               goto unbind_all;
 
        sun4i_framebuffer_init(drm);
 
@@ -119,6 +119,8 @@ static int sun4i_drv_bind(struct device *dev)
 
 finish_poll:
        drm_kms_helper_poll_fini(drm);
+unbind_all:
+       component_unbind_all(dev, NULL);
 cleanup_mode_config:
        drm_mode_config_cleanup(drm);
        of_reserved_mem_device_release(dev);
index 326a3d13a82956846a428ac2e395bab48297a486..c286c6ffe07f691af62cd8f77cf6cdffdce43b71 100644 (file)
@@ -295,8 +295,6 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
        if (unlock_resv)
                dma_resv_unlock(bo->base.resv);
 
-       ttm_bo_put(bo);
-
        return 0;
 }
 
index c7a1862f322a1cfadb039d58f52d2fcd30e07dd1..ae2f19dc9f81696894f010a732d5e78fad950276 100644 (file)
@@ -158,7 +158,7 @@ int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
                        struct ttm_buffer_object *bo = res->bo;
                        uint32_t num_pages;
 
-                       if (!bo)
+                       if (!bo || bo->resource != res)
                                continue;
 
                        num_pages = PFN_UP(bo->base.size);
index a04a9b20896dcab4c9baebf509be50a9422f3ba2..1778a2081fd6f72b88a79ffdcd2df879a383a885 100644 (file)
@@ -604,7 +604,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
        bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
 
        if (virtio_gpu_is_shmem(bo) && use_dma_api)
-               dma_sync_sgtable_for_device(&vgdev->vdev->dev,
+               dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
                                            bo->base.sgt, DMA_TO_DEVICE);
 
        cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
@@ -1026,7 +1026,7 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
        bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
 
        if (virtio_gpu_is_shmem(bo) && use_dma_api)
-               dma_sync_sgtable_for_device(&vgdev->vdev->dev,
+               dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
                                            bo->base.sgt, DMA_TO_DEVICE);
 
        cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
index 4872d183d86044ed7243e858e93c72deb203048e..aae2efeef503213ccc6695735b6bebf7d4511c16 100644 (file)
@@ -487,7 +487,6 @@ static int host1x_get_resets(struct host1x *host)
 static int host1x_probe(struct platform_device *pdev)
 {
        struct host1x *host;
-       int syncpt_irq;
        int err;
 
        host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
@@ -517,8 +516,8 @@ static int host1x_probe(struct platform_device *pdev)
        }
 
        host->syncpt_irq = platform_get_irq(pdev, 0);
-       if (syncpt_irq < 0)
-               return syncpt_irq;
+       if (host->syncpt_irq < 0)
+               return host->syncpt_irq;
 
        mutex_init(&host->devices_lock);
        INIT_LIST_HEAD(&host->devices);
index 51b3d16c32233d78a524a9afca79af57dffce362..6e4c92b500b8e383fd99fcc3aca91027e6cfb355 100644 (file)
@@ -488,10 +488,10 @@ static ssize_t temp_store(struct device *dev, struct device_attribute *attr,
                val = (temp - val) / 1000;
 
                if (sattr->index != 1) {
-                       data->temp[HYSTERSIS][sattr->index] &= 0xF0;
+                       data->temp[HYSTERSIS][sattr->index] &= 0x0F;
                        data->temp[HYSTERSIS][sattr->index] |= (val & 0xF) << 4;
                } else {
-                       data->temp[HYSTERSIS][sattr->index] &= 0x0F;
+                       data->temp[HYSTERSIS][sattr->index] &= 0xF0;
                        data->temp[HYSTERSIS][sattr->index] |= (val & 0xF);
                }
 
@@ -556,11 +556,11 @@ static ssize_t temp_st_show(struct device *dev, struct device_attribute *attr,
                val = data->enh_acoustics[0] & 0xf;
                break;
        case 1:
-               val = (data->enh_acoustics[1] >> 4) & 0xf;
+               val = data->enh_acoustics[1] & 0xf;
                break;
        case 2:
        default:
-               val = data->enh_acoustics[1] & 0xf;
+               val = (data->enh_acoustics[1] >> 4) & 0xf;
                break;
        }
 
index 33edb5c02f7d79d4e8ff7a75fe7ae03c84a83579..d193ed3cb35e5b315f104b04e272862922028e17 100644 (file)
@@ -757,6 +757,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
        struct hwmon_device *hwdev;
        const char *label;
        struct device *hdev;
+       struct device *tdev = dev;
        int i, err, id;
 
        /* Complain about invalid characters in hwmon name attribute */
@@ -826,7 +827,9 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
        hwdev->name = name;
        hdev->class = &hwmon_class;
        hdev->parent = dev;
-       hdev->of_node = dev ? dev->of_node : NULL;
+       while (tdev && !tdev->of_node)
+               tdev = tdev->parent;
+       hdev->of_node = tdev ? tdev->of_node : NULL;
        hwdev->chip = chip;
        dev_set_drvdata(hdev, drvdata);
        dev_set_name(hdev, HWMON_ID_FORMAT, id);
@@ -838,7 +841,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
 
        INIT_LIST_HEAD(&hwdev->tzdata);
 
-       if (dev && dev->of_node && chip && chip->ops->read &&
+       if (hdev->of_node && chip && chip->ops->read &&
            chip->info[0]->type == hwmon_chip &&
            (chip->info[0]->config[0] & HWMON_C_REGISTER_TZ)) {
                err = hwmon_thermal_register_sensors(hdev);
index e06186986444ee4853248deb4ec8424b616ed726..f3a4c5633b1ea6876336719a06ede4cad9207b30 100644 (file)
@@ -772,7 +772,7 @@ static int ina3221_probe_child_from_dt(struct device *dev,
                return ret;
        } else if (val > INA3221_CHANNEL3) {
                dev_err(dev, "invalid reg %d of %pOFn\n", val, child);
-               return ret;
+               return -EINVAL;
        }
 
        input = &ina->inputs[val];
index 66f7ceaa7c3f5fa6328c905f5ca64b8504f25b3c..e9614eb557d4eb44e16397419d988ef9a9dac1ad 100644 (file)
@@ -515,6 +515,8 @@ static const struct it87_devices it87_devices[] = {
 #define has_six_temp(data)     ((data)->features & FEAT_SIX_TEMP)
 #define has_vin3_5v(data)      ((data)->features & FEAT_VIN3_5V)
 #define has_conf_noexit(data)  ((data)->features & FEAT_CONF_NOEXIT)
+#define has_scaling(data)      ((data)->features & (FEAT_12MV_ADC | \
+                                                    FEAT_10_9MV_ADC))
 
 struct it87_sio_data {
        int sioaddr;
@@ -3134,7 +3136,7 @@ static int it87_probe(struct platform_device *pdev)
                         "Detected broken BIOS defaults, disabling PWM interface\n");
 
        /* Starting with IT8721F, we handle scaling of internal voltages */
-       if (has_12mv_adc(data)) {
+       if (has_scaling(data)) {
                if (sio_data->internal & BIT(0))
                        data->in_scaled |= BIT(3);      /* in3 is AVCC */
                if (sio_data->internal & BIT(1))
index 88514152d9306f98460fd8405ec48ad60c4fa025..69341de397cb93e4c0b741620712b55c42fd2acc 100644 (file)
@@ -323,6 +323,7 @@ static int ltc2992_config_gpio(struct ltc2992_state *st)
        st->gc.label = name;
        st->gc.parent = &st->client->dev;
        st->gc.owner = THIS_MODULE;
+       st->gc.can_sleep = true;
        st->gc.base = -1;
        st->gc.names = st->gpio_names;
        st->gc.ngpio = ARRAY_SIZE(st->gpio_names);
index 30850a479f61fdf96b5ae1cdd985b09802575bdc..87d56f0fc888c23fffac13ac67322639a4bbf409 100644 (file)
@@ -537,6 +537,12 @@ static const struct cpu_info cpu_hsx = {
        .thermal_margin_to_millidegree = &dts_eight_dot_eight_to_millidegree,
 };
 
+static const struct cpu_info cpu_skx = {
+       .reg            = &resolved_cores_reg_hsx,
+       .min_peci_revision = 0x33,
+       .thermal_margin_to_millidegree = &dts_ten_dot_six_to_millidegree,
+};
+
 static const struct cpu_info cpu_icx = {
        .reg            = &resolved_cores_reg_icx,
        .min_peci_revision = 0x40,
@@ -558,7 +564,7 @@ static const struct auxiliary_device_id peci_cputemp_ids[] = {
        },
        {
                .name = "peci_cpu.cputemp.skx",
-               .driver_data = (kernel_ulong_t)&cpu_hsx,
+               .driver_data = (kernel_ulong_t)&cpu_skx,
        },
        {
                .name = "peci_cpu.cputemp.icx",
index ec5f932fc6f0fb6d489418f25beaa1816710759e..1ac2b2f4c5705f5a142579e3085c703675d5b01b 100644 (file)
@@ -301,6 +301,7 @@ static int adm1266_config_gpio(struct adm1266_data *data)
        data->gc.label = name;
        data->gc.parent = &data->client->dev;
        data->gc.owner = THIS_MODULE;
+       data->gc.can_sleep = true;
        data->gc.base = -1;
        data->gc.names = data->gpio_names;
        data->gc.ngpio = ARRAY_SIZE(data->gpio_names);
index 75fc770c9e4035b2b982b34b543b5e51f6dd3378..3daaf22378322f6c2d0074bcad0f96220e227003 100644 (file)
@@ -7,6 +7,7 @@
  */
 
 #include <linux/debugfs.h>
+#include <linux/delay.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/of_device.h>
@@ -16,6 +17,7 @@
 #include <linux/i2c.h>
 #include <linux/pmbus.h>
 #include <linux/gpio/driver.h>
+#include <linux/timekeeping.h>
 #include "pmbus.h"
 
 enum chips { ucd9000, ucd90120, ucd90124, ucd90160, ucd90320, ucd9090,
@@ -65,6 +67,7 @@ struct ucd9000_data {
        struct gpio_chip gpio;
 #endif
        struct dentry *debugfs;
+       ktime_t write_time;
 };
 #define to_ucd9000_data(_info) container_of(_info, struct ucd9000_data, info)
 
@@ -73,6 +76,73 @@ struct ucd9000_debugfs_entry {
        u8 index;
 };
 
+/*
+ * It has been observed that the UCD90320 randomly fails register access when
+ * doing another access right on the back of a register write. To mitigate this
+ * make sure that there is a minimum delay between a write access and the
+ * following access. The 250us is based on experimental data. At a delay of
+ * 200us the issue seems to go away. Add a bit of extra margin to allow for
+ * system to system differences.
+ */
+#define UCD90320_WAIT_DELAY_US 250
+
+static inline void ucd90320_wait(const struct ucd9000_data *data)
+{
+       s64 delta = ktime_us_delta(ktime_get(), data->write_time);
+
+       if (delta < UCD90320_WAIT_DELAY_US)
+               udelay(UCD90320_WAIT_DELAY_US - delta);
+}
+
+static int ucd90320_read_word_data(struct i2c_client *client, int page,
+                                  int phase, int reg)
+{
+       const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+       struct ucd9000_data *data = to_ucd9000_data(info);
+
+       if (reg >= PMBUS_VIRT_BASE)
+               return -ENXIO;
+
+       ucd90320_wait(data);
+       return pmbus_read_word_data(client, page, phase, reg);
+}
+
+static int ucd90320_read_byte_data(struct i2c_client *client, int page, int reg)
+{
+       const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+       struct ucd9000_data *data = to_ucd9000_data(info);
+
+       ucd90320_wait(data);
+       return pmbus_read_byte_data(client, page, reg);
+}
+
+static int ucd90320_write_word_data(struct i2c_client *client, int page,
+                                   int reg, u16 word)
+{
+       const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+       struct ucd9000_data *data = to_ucd9000_data(info);
+       int ret;
+
+       ucd90320_wait(data);
+       ret = pmbus_write_word_data(client, page, reg, word);
+       data->write_time = ktime_get();
+
+       return ret;
+}
+
+static int ucd90320_write_byte(struct i2c_client *client, int page, u8 value)
+{
+       const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+       struct ucd9000_data *data = to_ucd9000_data(info);
+       int ret;
+
+       ucd90320_wait(data);
+       ret = pmbus_write_byte(client, page, value);
+       data->write_time = ktime_get();
+
+       return ret;
+}
+
 static int ucd9000_get_fan_config(struct i2c_client *client, int fan)
 {
        int fan_config = 0;
@@ -598,6 +668,11 @@ static int ucd9000_probe(struct i2c_client *client)
                info->read_byte_data = ucd9000_read_byte_data;
                info->func[0] |= PMBUS_HAVE_FAN12 | PMBUS_HAVE_STATUS_FAN12
                  | PMBUS_HAVE_FAN34 | PMBUS_HAVE_STATUS_FAN34;
+       } else if (mid->driver_data == ucd90320) {
+               info->read_byte_data = ucd90320_read_byte_data;
+               info->read_word_data = ucd90320_read_word_data;
+               info->write_byte = ucd90320_write_byte;
+               info->write_word_data = ucd90320_write_word_data;
        }
 
        ucd9000_probe_gpio(client, mid, data);
index 47bbe47e062fd7a20e27f843b625f07e005722ee..7d5f7441aceb1040f8b8bcfedb8695c162c0acf1 100644 (file)
@@ -758,7 +758,7 @@ static int tmp51x_probe(struct i2c_client *client)
 static struct i2c_driver tmp51x_driver = {
        .driver = {
                .name   = "tmp51x",
-               .of_match_table = of_match_ptr(tmp51x_of_match),
+               .of_match_table = tmp51x_of_match,
        },
        .probe_new      = tmp51x_probe,
        .id_table       = tmp51x_id,
index 5cde837bfd094c57a3cf719b0fe0e4c2645dce8e..78d9f52e2a7194f9e0bc8714394ea9311a006379 100644 (file)
@@ -698,14 +698,14 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
                ctx->comm_base_addr = pcc_chan->shmem_base_addr;
                if (ctx->comm_base_addr) {
                        if (version == XGENE_HWMON_V2)
-                               ctx->pcc_comm_addr = (void __force *)ioremap(
-                                                       ctx->comm_base_addr,
-                                                       pcc_chan->shmem_size);
+                               ctx->pcc_comm_addr = (void __force *)devm_ioremap(&pdev->dev,
+                                                                 ctx->comm_base_addr,
+                                                                 pcc_chan->shmem_size);
                        else
-                               ctx->pcc_comm_addr = memremap(
-                                                       ctx->comm_base_addr,
-                                                       pcc_chan->shmem_size,
-                                                       MEMREMAP_WB);
+                               ctx->pcc_comm_addr = devm_memremap(&pdev->dev,
+                                                                  ctx->comm_base_addr,
+                                                                  pcc_chan->shmem_size,
+                                                                  MEMREMAP_WB);
                } else {
                        dev_err(&pdev->dev, "Failed to get PCC comm region\n");
                        rc = -ENODEV;
@@ -761,6 +761,7 @@ static int xgene_hwmon_remove(struct platform_device *pdev)
 {
        struct xgene_hwmon_dev *ctx = platform_get_drvdata(pdev);
 
+       cancel_work_sync(&ctx->workq);
        hwmon_device_unregister(ctx->hwmon_dev);
        kfifo_free(&ctx->async_msg_fifo);
        if (acpi_disabled)
index 8c6c7075c765c502e194d222934b294ba966de89..e067671b3ce2eebed4eab2cb51301aaaa3890529 100644 (file)
@@ -316,6 +316,13 @@ static void hisi_i2c_xfer_msg(struct hisi_i2c_controller *ctlr)
                    max_write == 0)
                        break;
        }
+
+       /*
+        * Disable the TX_EMPTY interrupt after finishing all the messages to
+        * avoid overwhelming the CPU.
+        */
+       if (ctlr->msg_tx_idx == ctlr->msg_num)
+               hisi_i2c_disable_int(ctlr, HISI_I2C_INT_TX_EMPTY);
 }
 
 static irqreturn_t hisi_i2c_irq(int irq, void *context)
@@ -341,7 +348,11 @@ static irqreturn_t hisi_i2c_irq(int irq, void *context)
                hisi_i2c_read_rx_fifo(ctlr);
 
 out:
-       if (int_stat & HISI_I2C_INT_TRANS_CPLT || ctlr->xfer_err) {
+       /*
+        * Only use TRANS_CPLT to indicate the completion. On error cases we'll
+        * get two interrupts, INT_ERR first then TRANS_CPLT.
+        */
+       if (int_stat & HISI_I2C_INT_TRANS_CPLT) {
                hisi_i2c_disable_int(ctlr, HISI_I2C_INT_ALL);
                hisi_i2c_clear_int(ctlr, HISI_I2C_INT_ALL);
                complete(ctlr->completion);
index 188f2a36d2fd61bb3ffb943f8d879e8bba6bbe52..a49b14d52a98614bd5745baba5048ec24c43438a 100644 (file)
@@ -463,6 +463,8 @@ static int lpi2c_imx_xfer(struct i2c_adapter *adapter,
                if (num == 1 && msgs[0].len == 0)
                        goto stop;
 
+               lpi2c_imx->rx_buf = NULL;
+               lpi2c_imx->tx_buf = NULL;
                lpi2c_imx->delivered = 0;
                lpi2c_imx->msglen = msgs[i].len;
                init_completion(&lpi2c_imx->complete);
@@ -503,10 +505,14 @@ disable:
 static irqreturn_t lpi2c_imx_isr(int irq, void *dev_id)
 {
        struct lpi2c_imx_struct *lpi2c_imx = dev_id;
+       unsigned int enabled;
        unsigned int temp;
 
+       enabled = readl(lpi2c_imx->base + LPI2C_MIER);
+
        lpi2c_imx_intctrl(lpi2c_imx, 0);
        temp = readl(lpi2c_imx->base + LPI2C_MSR);
+       temp &= enabled;
 
        if (temp & MSR_RDF)
                lpi2c_imx_read_rxfifo(lpi2c_imx);
index d113bed7954526e1cb9afff96b8df98be26d1f15..e0f3b3545cfe4984dad9c582c419737ce448b506 100644 (file)
@@ -171,7 +171,7 @@ static void mxs_i2c_dma_irq_callback(void *param)
 }
 
 static int mxs_i2c_dma_setup_xfer(struct i2c_adapter *adap,
-                       struct i2c_msg *msg, uint32_t flags)
+                       struct i2c_msg *msg, u8 *buf, uint32_t flags)
 {
        struct dma_async_tx_descriptor *desc;
        struct mxs_i2c_dev *i2c = i2c_get_adapdata(adap);
@@ -226,7 +226,7 @@ static int mxs_i2c_dma_setup_xfer(struct i2c_adapter *adap,
                }
 
                /* Queue the DMA data transfer. */
-               sg_init_one(&i2c->sg_io[1], msg->buf, msg->len);
+               sg_init_one(&i2c->sg_io[1], buf, msg->len);
                dma_map_sg(i2c->dev, &i2c->sg_io[1], 1, DMA_FROM_DEVICE);
                desc = dmaengine_prep_slave_sg(i2c->dmach, &i2c->sg_io[1], 1,
                                        DMA_DEV_TO_MEM,
@@ -259,7 +259,7 @@ static int mxs_i2c_dma_setup_xfer(struct i2c_adapter *adap,
                /* Queue the DMA data transfer. */
                sg_init_table(i2c->sg_io, 2);
                sg_set_buf(&i2c->sg_io[0], &i2c->addr_data, 1);
-               sg_set_buf(&i2c->sg_io[1], msg->buf, msg->len);
+               sg_set_buf(&i2c->sg_io[1], buf, msg->len);
                dma_map_sg(i2c->dev, i2c->sg_io, 2, DMA_TO_DEVICE);
                desc = dmaengine_prep_slave_sg(i2c->dmach, i2c->sg_io, 2,
                                        DMA_MEM_TO_DEV,
@@ -563,6 +563,7 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
        struct mxs_i2c_dev *i2c = i2c_get_adapdata(adap);
        int ret;
        int flags;
+       u8 *dma_buf;
        int use_pio = 0;
        unsigned long time_left;
 
@@ -588,13 +589,20 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
                if (ret && (ret != -ENXIO))
                        mxs_i2c_reset(i2c);
        } else {
+               dma_buf = i2c_get_dma_safe_msg_buf(msg, 1);
+               if (!dma_buf)
+                       return -ENOMEM;
+
                reinit_completion(&i2c->cmd_complete);
-               ret = mxs_i2c_dma_setup_xfer(adap, msg, flags);
-               if (ret)
+               ret = mxs_i2c_dma_setup_xfer(adap, msg, dma_buf, flags);
+               if (ret) {
+                       i2c_put_dma_safe_msg_buf(dma_buf, msg, false);
                        return ret;
+               }
 
                time_left = wait_for_completion_timeout(&i2c->cmd_complete,
                                                msecs_to_jiffies(1000));
+               i2c_put_dma_safe_msg_buf(dma_buf, msg, true);
                if (!time_left)
                        goto timeout;
 
index 63259b3ea5abd7aa43de6b22d0ec7861a47e7489..3538d36368a90b5fcf5f8e18e01826908c2198ec 100644 (file)
@@ -308,6 +308,9 @@ static int slimpro_i2c_blkwr(struct slimpro_i2c_dev *ctx, u32 chip,
        u32 msg[3];
        int rc;
 
+       if (writelen > I2C_SMBUS_BLOCK_MAX)
+               return -EINVAL;
+
        memcpy(ctx->dma_buffer, data, writelen);
        paddr = dma_map_single(ctx->dev, ctx->dma_buffer, writelen,
                               DMA_TO_DEVICE);
index 0f392f59b1353142188dcc12774b9c53dcb181f0..7a24c1444ace3c50e4727dd757c123c74fc05b17 100644 (file)
@@ -850,6 +850,10 @@ void icc_node_destroy(int id)
 
        mutex_unlock(&icc_lock);
 
+       if (!node)
+               return;
+
+       kfree(node->links);
        kfree(node);
 }
 EXPORT_SYMBOL_GPL(icc_node_destroy);
@@ -1029,54 +1033,68 @@ int icc_nodes_remove(struct icc_provider *provider)
 EXPORT_SYMBOL_GPL(icc_nodes_remove);
 
 /**
- * icc_provider_add() - add a new interconnect provider
- * @provider: the interconnect provider that will be added into topology
+ * icc_provider_init() - initialize a new interconnect provider
+ * @provider: the interconnect provider to initialize
+ *
+ * Must be called before adding nodes to the provider.
+ */
+void icc_provider_init(struct icc_provider *provider)
+{
+       WARN_ON(!provider->set);
+
+       INIT_LIST_HEAD(&provider->nodes);
+}
+EXPORT_SYMBOL_GPL(icc_provider_init);
+
+/**
+ * icc_provider_register() - register a new interconnect provider
+ * @provider: the interconnect provider to register
  *
  * Return: 0 on success, or an error code otherwise
  */
-int icc_provider_add(struct icc_provider *provider)
+int icc_provider_register(struct icc_provider *provider)
 {
-       if (WARN_ON(!provider->set))
-               return -EINVAL;
        if (WARN_ON(!provider->xlate && !provider->xlate_extended))
                return -EINVAL;
 
        mutex_lock(&icc_lock);
-
-       INIT_LIST_HEAD(&provider->nodes);
        list_add_tail(&provider->provider_list, &icc_providers);
-
        mutex_unlock(&icc_lock);
 
-       dev_dbg(provider->dev, "interconnect provider added to topology\n");
+       dev_dbg(provider->dev, "interconnect provider registered\n");
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(icc_provider_add);
+EXPORT_SYMBOL_GPL(icc_provider_register);
 
 /**
- * icc_provider_del() - delete previously added interconnect provider
- * @provider: the interconnect provider that will be removed from topology
+ * icc_provider_deregister() - deregister an interconnect provider
+ * @provider: the interconnect provider to deregister
  */
-void icc_provider_del(struct icc_provider *provider)
+void icc_provider_deregister(struct icc_provider *provider)
 {
        mutex_lock(&icc_lock);
-       if (provider->users) {
-               pr_warn("interconnect provider still has %d users\n",
-                       provider->users);
-               mutex_unlock(&icc_lock);
-               return;
-       }
-
-       if (!list_empty(&provider->nodes)) {
-               pr_warn("interconnect provider still has nodes\n");
-               mutex_unlock(&icc_lock);
-               return;
-       }
+       WARN_ON(provider->users);
 
        list_del(&provider->provider_list);
        mutex_unlock(&icc_lock);
 }
+EXPORT_SYMBOL_GPL(icc_provider_deregister);
+
+int icc_provider_add(struct icc_provider *provider)
+{
+       icc_provider_init(provider);
+
+       return icc_provider_register(provider);
+}
+EXPORT_SYMBOL_GPL(icc_provider_add);
+
+void icc_provider_del(struct icc_provider *provider)
+{
+       WARN_ON(!list_empty(&provider->nodes));
+
+       icc_provider_deregister(provider);
+}
 EXPORT_SYMBOL_GPL(icc_provider_del);
 
 static const struct of_device_id __maybe_unused ignore_list[] = {
index 823d9be9771a1c70a3d3db7d3b844a68c868854c..979ed610f704b576d346547deba78a2ea2b2a4d5 100644 (file)
@@ -295,6 +295,9 @@ int imx_icc_register(struct platform_device *pdev,
        provider->xlate = of_icc_xlate_onecell;
        provider->data = data;
        provider->dev = dev->parent;
+
+       icc_provider_init(provider);
+
        platform_set_drvdata(pdev, imx_provider);
 
        if (settings) {
@@ -306,20 +309,18 @@ int imx_icc_register(struct platform_device *pdev,
                }
        }
 
-       ret = icc_provider_add(provider);
-       if (ret) {
-               dev_err(dev, "error adding interconnect provider: %d\n", ret);
+       ret = imx_icc_register_nodes(imx_provider, nodes, nodes_count, settings);
+       if (ret)
                return ret;
-       }
 
-       ret = imx_icc_register_nodes(imx_provider, nodes, nodes_count, settings);
+       ret = icc_provider_register(provider);
        if (ret)
-               goto provider_del;
+               goto err_unregister_nodes;
 
        return 0;
 
-provider_del:
-       icc_provider_del(provider);
+err_unregister_nodes:
+       imx_icc_unregister_nodes(&imx_provider->provider);
        return ret;
 }
 EXPORT_SYMBOL_GPL(imx_icc_register);
@@ -328,9 +329,8 @@ void imx_icc_unregister(struct platform_device *pdev)
 {
        struct imx_icc_provider *imx_provider = platform_get_drvdata(pdev);
 
+       icc_provider_deregister(&imx_provider->provider);
        imx_icc_unregister_nodes(&imx_provider->provider);
-
-       icc_provider_del(&imx_provider->provider);
 }
 EXPORT_SYMBOL_GPL(imx_icc_unregister);
 
index df3196f7253687248bbd00fe2099e6d531171f38..4180a06681b2b9aca3134ee1aeb57e2c0ca6df20 100644 (file)
@@ -503,7 +503,6 @@ regmap_done:
        }
 
        provider = &qp->provider;
-       INIT_LIST_HEAD(&provider->nodes);
        provider->dev = dev;
        provider->set = qcom_icc_set;
        provider->pre_aggregate = qcom_icc_pre_bw_aggregate;
@@ -511,12 +510,7 @@ regmap_done:
        provider->xlate_extended = qcom_icc_xlate_extended;
        provider->data = data;
 
-       ret = icc_provider_add(provider);
-       if (ret) {
-               dev_err(dev, "error adding interconnect provider: %d\n", ret);
-               clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
-               return ret;
-       }
+       icc_provider_init(provider);
 
        for (i = 0; i < num_nodes; i++) {
                size_t j;
@@ -524,7 +518,7 @@ regmap_done:
                node = icc_node_create(qnodes[i]->id);
                if (IS_ERR(node)) {
                        ret = PTR_ERR(node);
-                       goto err;
+                       goto err_remove_nodes;
                }
 
                node->name = qnodes[i]->name;
@@ -538,17 +532,26 @@ regmap_done:
        }
        data->num_nodes = num_nodes;
 
+       ret = icc_provider_register(provider);
+       if (ret)
+               goto err_remove_nodes;
+
        platform_set_drvdata(pdev, qp);
 
        /* Populate child NoC devices if any */
-       if (of_get_child_count(dev->of_node) > 0)
-               return of_platform_populate(dev->of_node, NULL, NULL, dev);
+       if (of_get_child_count(dev->of_node) > 0) {
+               ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
+               if (ret)
+                       goto err_deregister_provider;
+       }
 
        return 0;
-err:
+
+err_deregister_provider:
+       icc_provider_deregister(provider);
+err_remove_nodes:
        icc_nodes_remove(provider);
        clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
-       icc_provider_del(provider);
 
        return ret;
 }
@@ -558,9 +561,9 @@ int qnoc_remove(struct platform_device *pdev)
 {
        struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
 
+       icc_provider_deregister(&qp->provider);
        icc_nodes_remove(&qp->provider);
        clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
-       icc_provider_del(&qp->provider);
 
        return 0;
 }
index fd17291c61eb96b5b9480669c1f1f090b41c2825..fdb5e58e408b4da8d42f614fad20805cc9c09ca0 100644 (file)
@@ -192,9 +192,10 @@ int qcom_icc_rpmh_probe(struct platform_device *pdev)
        provider->pre_aggregate = qcom_icc_pre_aggregate;
        provider->aggregate = qcom_icc_aggregate;
        provider->xlate_extended = qcom_icc_xlate_extended;
-       INIT_LIST_HEAD(&provider->nodes);
        provider->data = data;
 
+       icc_provider_init(provider);
+
        qp->dev = dev;
        qp->bcms = desc->bcms;
        qp->num_bcms = desc->num_bcms;
@@ -203,10 +204,6 @@ int qcom_icc_rpmh_probe(struct platform_device *pdev)
        if (IS_ERR(qp->voter))
                return PTR_ERR(qp->voter);
 
-       ret = icc_provider_add(provider);
-       if (ret)
-               return ret;
-
        for (i = 0; i < qp->num_bcms; i++)
                qcom_icc_bcm_init(qp->bcms[i], dev);
 
@@ -218,7 +215,7 @@ int qcom_icc_rpmh_probe(struct platform_device *pdev)
                node = icc_node_create(qn->id);
                if (IS_ERR(node)) {
                        ret = PTR_ERR(node);
-                       goto err;
+                       goto err_remove_nodes;
                }
 
                node->name = qn->name;
@@ -232,16 +229,27 @@ int qcom_icc_rpmh_probe(struct platform_device *pdev)
        }
 
        data->num_nodes = num_nodes;
+
+       ret = icc_provider_register(provider);
+       if (ret)
+               goto err_remove_nodes;
+
        platform_set_drvdata(pdev, qp);
 
        /* Populate child NoC devices if any */
-       if (of_get_child_count(dev->of_node) > 0)
-               return of_platform_populate(dev->of_node, NULL, NULL, dev);
+       if (of_get_child_count(dev->of_node) > 0) {
+               ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
+               if (ret)
+                       goto err_deregister_provider;
+       }
 
        return 0;
-err:
+
+err_deregister_provider:
+       icc_provider_deregister(provider);
+err_remove_nodes:
        icc_nodes_remove(provider);
-       icc_provider_del(provider);
+
        return ret;
 }
 EXPORT_SYMBOL_GPL(qcom_icc_rpmh_probe);
@@ -250,8 +258,8 @@ int qcom_icc_rpmh_remove(struct platform_device *pdev)
 {
        struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
 
+       icc_provider_deregister(&qp->provider);
        icc_nodes_remove(&qp->provider);
-       icc_provider_del(&qp->provider);
 
        return 0;
 }
index 5ea192f1141dc42f4cee48fb6c28cc51bcd65430..1828deaca44326249d0e29da7c5cf9456a5b2315 100644 (file)
@@ -692,7 +692,6 @@ static int msm8974_icc_probe(struct platform_device *pdev)
                return ret;
 
        provider = &qp->provider;
-       INIT_LIST_HEAD(&provider->nodes);
        provider->dev = dev;
        provider->set = msm8974_icc_set;
        provider->aggregate = icc_std_aggregate;
@@ -700,11 +699,7 @@ static int msm8974_icc_probe(struct platform_device *pdev)
        provider->data = data;
        provider->get_bw = msm8974_get_bw;
 
-       ret = icc_provider_add(provider);
-       if (ret) {
-               dev_err(dev, "error adding interconnect provider: %d\n", ret);
-               goto err_disable_clks;
-       }
+       icc_provider_init(provider);
 
        for (i = 0; i < num_nodes; i++) {
                size_t j;
@@ -712,7 +707,7 @@ static int msm8974_icc_probe(struct platform_device *pdev)
                node = icc_node_create(qnodes[i]->id);
                if (IS_ERR(node)) {
                        ret = PTR_ERR(node);
-                       goto err_del_icc;
+                       goto err_remove_nodes;
                }
 
                node->name = qnodes[i]->name;
@@ -729,15 +724,16 @@ static int msm8974_icc_probe(struct platform_device *pdev)
        }
        data->num_nodes = num_nodes;
 
+       ret = icc_provider_register(provider);
+       if (ret)
+               goto err_remove_nodes;
+
        platform_set_drvdata(pdev, qp);
 
        return 0;
 
-err_del_icc:
+err_remove_nodes:
        icc_nodes_remove(provider);
-       icc_provider_del(provider);
-
-err_disable_clks:
        clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
 
        return ret;
@@ -747,9 +743,9 @@ static int msm8974_icc_remove(struct platform_device *pdev)
 {
        struct msm8974_icc_provider *qp = platform_get_drvdata(pdev);
 
+       icc_provider_deregister(&qp->provider);
        icc_nodes_remove(&qp->provider);
        clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
-       icc_provider_del(&qp->provider);
 
        return 0;
 }
index 5fa1710874258926ac8c31a7b8b70e0880a538d8..1bafb54f1432912a6fdbc4846ca5549390de031c 100644 (file)
@@ -158,8 +158,8 @@ static int qcom_osm_l3_remove(struct platform_device *pdev)
 {
        struct qcom_osm_l3_icc_provider *qp = platform_get_drvdata(pdev);
 
+       icc_provider_deregister(&qp->provider);
        icc_nodes_remove(&qp->provider);
-       icc_provider_del(&qp->provider);
 
        return 0;
 }
@@ -236,7 +236,7 @@ static int qcom_osm_l3_probe(struct platform_device *pdev)
        qnodes = desc->nodes;
        num_nodes = desc->num_nodes;
 
-       data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL);
+       data = devm_kzalloc(&pdev->dev, struct_size(data, nodes, num_nodes), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
@@ -245,14 +245,9 @@ static int qcom_osm_l3_probe(struct platform_device *pdev)
        provider->set = qcom_osm_l3_set;
        provider->aggregate = icc_std_aggregate;
        provider->xlate = of_icc_xlate_onecell;
-       INIT_LIST_HEAD(&provider->nodes);
        provider->data = data;
 
-       ret = icc_provider_add(provider);
-       if (ret) {
-               dev_err(&pdev->dev, "error adding interconnect provider\n");
-               return ret;
-       }
+       icc_provider_init(provider);
 
        for (i = 0; i < num_nodes; i++) {
                size_t j;
@@ -275,12 +270,15 @@ static int qcom_osm_l3_probe(struct platform_device *pdev)
        }
        data->num_nodes = num_nodes;
 
+       ret = icc_provider_register(provider);
+       if (ret)
+               goto err;
+
        platform_set_drvdata(pdev, qp);
 
        return 0;
 err:
        icc_nodes_remove(provider);
-       icc_provider_del(provider);
 
        return ret;
 }
index 0da612d6398c54874f6b97f674e50edabc5247a5..a29cdb4fac03faec3c376bd68e0a7a5c86c0404a 100644 (file)
@@ -147,9 +147,9 @@ static struct qcom_icc_node mas_snoc_bimc_nrt = {
        .name = "mas_snoc_bimc_nrt",
        .buswidth = 16,
        .qos.ap_owned = true,
-       .qos.qos_port = 2,
+       .qos.qos_port = 3,
        .qos.qos_mode = NOC_QOS_MODE_BYPASS,
-       .mas_rpm_id = 163,
+       .mas_rpm_id = 164,
        .slv_rpm_id = -1,
        .num_links = ARRAY_SIZE(mas_snoc_bimc_nrt_links),
        .links = mas_snoc_bimc_nrt_links,
index e3a12e3d6e0619275670d3d41a69680f8c61d667..2d7a8e7b85ec29e94684ee7cc172be46109f89ec 100644 (file)
@@ -1844,100 +1844,6 @@ static const struct qcom_icc_desc sm8450_system_noc = {
        .num_bcms = ARRAY_SIZE(system_noc_bcms),
 };
 
-static int qnoc_probe(struct platform_device *pdev)
-{
-       const struct qcom_icc_desc *desc;
-       struct icc_onecell_data *data;
-       struct icc_provider *provider;
-       struct qcom_icc_node * const *qnodes;
-       struct qcom_icc_provider *qp;
-       struct icc_node *node;
-       size_t num_nodes, i;
-       int ret;
-
-       desc = device_get_match_data(&pdev->dev);
-       if (!desc)
-               return -EINVAL;
-
-       qnodes = desc->nodes;
-       num_nodes = desc->num_nodes;
-
-       qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL);
-       if (!qp)
-               return -ENOMEM;
-
-       data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL);
-       if (!data)
-               return -ENOMEM;
-
-       provider = &qp->provider;
-       provider->dev = &pdev->dev;
-       provider->set = qcom_icc_set;
-       provider->pre_aggregate = qcom_icc_pre_aggregate;
-       provider->aggregate = qcom_icc_aggregate;
-       provider->xlate_extended = qcom_icc_xlate_extended;
-       INIT_LIST_HEAD(&provider->nodes);
-       provider->data = data;
-
-       qp->dev = &pdev->dev;
-       qp->bcms = desc->bcms;
-       qp->num_bcms = desc->num_bcms;
-
-       qp->voter = of_bcm_voter_get(qp->dev, NULL);
-       if (IS_ERR(qp->voter))
-               return PTR_ERR(qp->voter);
-
-       ret = icc_provider_add(provider);
-       if (ret) {
-               dev_err(&pdev->dev, "error adding interconnect provider\n");
-               return ret;
-       }
-
-       for (i = 0; i < qp->num_bcms; i++)
-               qcom_icc_bcm_init(qp->bcms[i], &pdev->dev);
-
-       for (i = 0; i < num_nodes; i++) {
-               size_t j;
-
-               if (!qnodes[i])
-                       continue;
-
-               node = icc_node_create(qnodes[i]->id);
-               if (IS_ERR(node)) {
-                       ret = PTR_ERR(node);
-                       goto err;
-               }
-
-               node->name = qnodes[i]->name;
-               node->data = qnodes[i];
-               icc_node_add(node, provider);
-
-               for (j = 0; j < qnodes[i]->num_links; j++)
-                       icc_link_create(node, qnodes[i]->links[j]);
-
-               data->nodes[i] = node;
-       }
-       data->num_nodes = num_nodes;
-
-       platform_set_drvdata(pdev, qp);
-
-       return 0;
-err:
-       icc_nodes_remove(provider);
-       icc_provider_del(provider);
-       return ret;
-}
-
-static int qnoc_remove(struct platform_device *pdev)
-{
-       struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
-
-       icc_nodes_remove(&qp->provider);
-       icc_provider_del(&qp->provider);
-
-       return 0;
-}
-
 static const struct of_device_id qnoc_of_match[] = {
        { .compatible = "qcom,sm8450-aggre1-noc",
          .data = &sm8450_aggre1_noc},
@@ -1966,8 +1872,8 @@ static const struct of_device_id qnoc_of_match[] = {
 MODULE_DEVICE_TABLE(of, qnoc_of_match);
 
 static struct platform_driver qnoc_driver = {
-       .probe = qnoc_probe,
-       .remove = qnoc_remove,
+       .probe = qcom_icc_rpmh_probe,
+       .remove = qcom_icc_rpmh_remove,
        .driver = {
                .name = "qnoc-sm8450",
                .of_match_table = qnoc_of_match,
index 54fa027ab961f7c86a1208538f0d1c9c7d052338..d823ba988ef68c75a976de744c0e04750364bc2b 100644 (file)
@@ -2165,101 +2165,6 @@ static const struct qcom_icc_desc sm8550_system_noc = {
        .num_bcms = ARRAY_SIZE(system_noc_bcms),
 };
 
-static int qnoc_probe(struct platform_device *pdev)
-{
-       const struct qcom_icc_desc *desc;
-       struct icc_onecell_data *data;
-       struct icc_provider *provider;
-       struct qcom_icc_node * const *qnodes;
-       struct qcom_icc_provider *qp;
-       struct icc_node *node;
-       size_t num_nodes, i;
-       int ret;
-
-       desc = device_get_match_data(&pdev->dev);
-       if (!desc)
-               return -EINVAL;
-
-       qnodes = desc->nodes;
-       num_nodes = desc->num_nodes;
-
-       qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL);
-       if (!qp)
-               return -ENOMEM;
-
-       data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL);
-       if (!data)
-               return -ENOMEM;
-
-       provider = &qp->provider;
-       provider->dev = &pdev->dev;
-       provider->set = qcom_icc_set;
-       provider->pre_aggregate = qcom_icc_pre_aggregate;
-       provider->aggregate = qcom_icc_aggregate;
-       provider->xlate_extended = qcom_icc_xlate_extended;
-       INIT_LIST_HEAD(&provider->nodes);
-       provider->data = data;
-
-       qp->dev = &pdev->dev;
-       qp->bcms = desc->bcms;
-       qp->num_bcms = desc->num_bcms;
-
-       qp->voter = of_bcm_voter_get(qp->dev, NULL);
-       if (IS_ERR(qp->voter))
-               return PTR_ERR(qp->voter);
-
-       ret = icc_provider_add(provider);
-       if (ret) {
-               dev_err_probe(&pdev->dev, ret,
-                             "error adding interconnect provider\n");
-               return ret;
-       }
-
-       for (i = 0; i < qp->num_bcms; i++)
-               qcom_icc_bcm_init(qp->bcms[i], &pdev->dev);
-
-       for (i = 0; i < num_nodes; i++) {
-               size_t j;
-
-               if (!qnodes[i])
-                       continue;
-
-               node = icc_node_create(qnodes[i]->id);
-               if (IS_ERR(node)) {
-                       ret = PTR_ERR(node);
-                       goto err;
-               }
-
-               node->name = qnodes[i]->name;
-               node->data = qnodes[i];
-               icc_node_add(node, provider);
-
-               for (j = 0; j < qnodes[i]->num_links; j++)
-                       icc_link_create(node, qnodes[i]->links[j]);
-
-               data->nodes[i] = node;
-       }
-       data->num_nodes = num_nodes;
-
-       platform_set_drvdata(pdev, qp);
-
-       return 0;
-err:
-       icc_nodes_remove(provider);
-       icc_provider_del(provider);
-       return ret;
-}
-
-static int qnoc_remove(struct platform_device *pdev)
-{
-       struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
-
-       icc_nodes_remove(&qp->provider);
-       icc_provider_del(&qp->provider);
-
-       return 0;
-}
-
 static const struct of_device_id qnoc_of_match[] = {
        { .compatible = "qcom,sm8550-aggre1-noc",
          .data = &sm8550_aggre1_noc},
@@ -2294,8 +2199,8 @@ static const struct of_device_id qnoc_of_match[] = {
 MODULE_DEVICE_TABLE(of, qnoc_of_match);
 
 static struct platform_driver qnoc_driver = {
-       .probe = qnoc_probe,
-       .remove = qnoc_remove,
+       .probe = qcom_icc_rpmh_probe,
+       .remove = qcom_icc_rpmh_remove,
        .driver = {
                .name = "qnoc-sm8550",
                .of_match_table = qnoc_of_match,
index 6559d8cf80687bf8d4034f52ea1f150c33c5f13c..ebf09bbf725bd117195eec5457b5495b12305105 100644 (file)
@@ -96,14 +96,9 @@ static struct icc_node *exynos_generic_icc_xlate(struct of_phandle_args *spec,
 static int exynos_generic_icc_remove(struct platform_device *pdev)
 {
        struct exynos_icc_priv *priv = platform_get_drvdata(pdev);
-       struct icc_node *parent_node, *node = priv->node;
-
-       parent_node = exynos_icc_get_parent(priv->dev->parent->of_node);
-       if (parent_node && !IS_ERR(parent_node))
-               icc_link_destroy(node, parent_node);
 
+       icc_provider_deregister(&priv->provider);
        icc_nodes_remove(&priv->provider);
-       icc_provider_del(&priv->provider);
 
        return 0;
 }
@@ -132,15 +127,11 @@ static int exynos_generic_icc_probe(struct platform_device *pdev)
        provider->inter_set = true;
        provider->data = priv;
 
-       ret = icc_provider_add(provider);
-       if (ret < 0)
-               return ret;
+       icc_provider_init(provider);
 
        icc_node = icc_node_create(pdev->id);
-       if (IS_ERR(icc_node)) {
-               ret = PTR_ERR(icc_node);
-               goto err_prov_del;
-       }
+       if (IS_ERR(icc_node))
+               return PTR_ERR(icc_node);
 
        priv->node = icc_node;
        icc_node->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%pOFn",
@@ -149,6 +140,9 @@ static int exynos_generic_icc_probe(struct platform_device *pdev)
                                 &priv->bus_clk_ratio))
                priv->bus_clk_ratio = EXYNOS_ICC_DEFAULT_BUS_CLK_RATIO;
 
+       icc_node->data = priv;
+       icc_node_add(icc_node, provider);
+
        /*
         * Register a PM QoS request for the parent (devfreq) device.
         */
@@ -157,9 +151,6 @@ static int exynos_generic_icc_probe(struct platform_device *pdev)
        if (ret < 0)
                goto err_node_del;
 
-       icc_node->data = priv;
-       icc_node_add(icc_node, provider);
-
        icc_parent_node = exynos_icc_get_parent(bus_dev->of_node);
        if (IS_ERR(icc_parent_node)) {
                ret = PTR_ERR(icc_parent_node);
@@ -171,14 +162,17 @@ static int exynos_generic_icc_probe(struct platform_device *pdev)
                        goto err_pmqos_del;
        }
 
+       ret = icc_provider_register(provider);
+       if (ret < 0)
+               goto err_pmqos_del;
+
        return 0;
 
 err_pmqos_del:
        dev_pm_qos_remove_request(&priv->qos_req);
 err_node_del:
        icc_nodes_remove(provider);
-err_prov_del:
-       icc_provider_del(provider);
+
        return ret;
 }
 
index 5f1e2593fad7ec4feb9667a9c790cbb1b35123de..b0a22e99bade37148914492b26ec75d5523088d7 100644 (file)
@@ -15,6 +15,10 @@ if MD
 config BLK_DEV_MD
        tristate "RAID support"
        select BLOCK_HOLDER_DEPRECATED if SYSFS
+       # BLOCK_LEGACY_AUTOLOAD requirement should be removed
+       # after relevant mdadm enhancements - to make "names=yes"
+       # the default - are widely available.
+       select BLOCK_LEGACY_AUTOLOAD
        help
          This driver lets you combine several hard disk partitions into one
          logical block device. This can be used to simply append one
index 40cb1719ae4d526b2fbe40a93f5f36fe7e25ec28..3ba53dc3cc3f627218941a61a9bdd6d948c87f43 100644 (file)
@@ -72,7 +72,9 @@ struct dm_crypt_io {
        struct crypt_config *cc;
        struct bio *base_bio;
        u8 *integrity_metadata;
-       bool integrity_metadata_from_pool;
+       bool integrity_metadata_from_pool:1;
+       bool in_tasklet:1;
+
        struct work_struct work;
        struct tasklet_struct tasklet;
 
@@ -1730,6 +1732,7 @@ static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
        io->ctx.r.req = NULL;
        io->integrity_metadata = NULL;
        io->integrity_metadata_from_pool = false;
+       io->in_tasklet = false;
        atomic_set(&io->io_pending, 0);
 }
 
@@ -1776,14 +1779,13 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
         * our tasklet. In this case we need to delay bio_endio()
         * execution to after the tasklet is done and dequeued.
         */
-       if (tasklet_trylock(&io->tasklet)) {
-               tasklet_unlock(&io->tasklet);
-               bio_endio(base_bio);
+       if (io->in_tasklet) {
+               INIT_WORK(&io->work, kcryptd_io_bio_endio);
+               queue_work(cc->io_queue, &io->work);
                return;
        }
 
-       INIT_WORK(&io->work, kcryptd_io_bio_endio);
-       queue_work(cc->io_queue, &io->work);
+       bio_endio(base_bio);
 }
 
 /*
@@ -1936,6 +1938,7 @@ pop_from_list:
                        io = crypt_io_from_node(rb_first(&write_tree));
                        rb_erase(&io->rb_node, &write_tree);
                        kcryptd_io_write(io);
+                       cond_resched();
                } while (!RB_EMPTY_ROOT(&write_tree));
                blk_finish_plug(&plug);
        }
@@ -2230,6 +2233,7 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io)
                 * it is being executed with irqs disabled.
                 */
                if (in_hardirq() || irqs_disabled()) {
+                       io->in_tasklet = true;
                        tasklet_init(&io->tasklet, kcryptd_crypt_tasklet, (unsigned long)&io->work);
                        tasklet_schedule(&io->tasklet);
                        return;
index c21a19ab73f705d791e611101e79b467fe329bf0..db2d997a6c1815ce6f7f72fa0fed51572afae856 100644 (file)
@@ -188,7 +188,7 @@ static int dm_stat_in_flight(struct dm_stat_shared *shared)
               atomic_read(&shared->in_flight[WRITE]);
 }
 
-void dm_stats_init(struct dm_stats *stats)
+int dm_stats_init(struct dm_stats *stats)
 {
        int cpu;
        struct dm_stats_last_position *last;
@@ -197,11 +197,16 @@ void dm_stats_init(struct dm_stats *stats)
        INIT_LIST_HEAD(&stats->list);
        stats->precise_timestamps = false;
        stats->last = alloc_percpu(struct dm_stats_last_position);
+       if (!stats->last)
+               return -ENOMEM;
+
        for_each_possible_cpu(cpu) {
                last = per_cpu_ptr(stats->last, cpu);
                last->last_sector = (sector_t)ULLONG_MAX;
                last->last_rw = UINT_MAX;
        }
+
+       return 0;
 }
 
 void dm_stats_cleanup(struct dm_stats *stats)
index 0bc152c8e4f310282845a30d65a0528d029af254..c6728c8b41594bd1a9cac425be013d8a03f085d3 100644 (file)
@@ -21,7 +21,7 @@ struct dm_stats_aux {
        unsigned long long duration_ns;
 };
 
-void dm_stats_init(struct dm_stats *st);
+int dm_stats_init(struct dm_stats *st);
 void dm_stats_cleanup(struct dm_stats *st);
 
 struct mapped_device;
index 6cd105c1cef35cc51defa05a16684b361ef1ac12..13d4677baafd176065a8e218695910205606245f 100644 (file)
@@ -3369,6 +3369,7 @@ static int pool_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        pt->low_water_blocks = low_water_blocks;
        pt->adjusted_pf = pt->requested_pf = pf;
        ti->num_flush_bios = 1;
+       ti->limit_swap_bios = true;
 
        /*
         * Only need to enable discards if the pool should pass
@@ -4249,6 +4250,7 @@ static int thin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                goto bad;
 
        ti->num_flush_bios = 1;
+       ti->limit_swap_bios = true;
        ti->flush_supported = true;
        ti->accounts_remapped_io = true;
        ti->per_io_data_size = sizeof(struct dm_thin_endio_hook);
index eace45a18d45611e70e1b4a988d674535925a30a..2d0f934ba6e6ad9a6b5e5a553475fe02c1d857f1 100644 (file)
@@ -512,10 +512,10 @@ static void dm_io_acct(struct dm_io *io, bool end)
                sectors = io->sectors;
 
        if (!end)
-               bdev_start_io_acct(bio->bi_bdev, sectors, bio_op(bio),
-                                  start_time);
+               bdev_start_io_acct(bio->bi_bdev, bio_op(bio), start_time);
        else
-               bdev_end_io_acct(bio->bi_bdev, bio_op(bio), start_time);
+               bdev_end_io_acct(bio->bi_bdev, bio_op(bio), sectors,
+                                start_time);
 
        if (static_branch_unlikely(&stats_enabled) &&
            unlikely(dm_stats_used(&md->stats))) {
@@ -2097,7 +2097,9 @@ static struct mapped_device *alloc_dev(int minor)
        if (!md->pending_io)
                goto bad;
 
-       dm_stats_init(&md->stats);
+       r = dm_stats_init(&md->stats);
+       if (r < 0)
+               goto bad;
 
        /* Populate the mapping, nobody knows we exist yet */
        spin_lock(&_minor_lock);
index 927a43db5dfbb98fd0702163e0962201e111fe68..39e49e5d71823ed2692cedab32a1418c23d6e162 100644 (file)
@@ -3128,6 +3128,9 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)
                err = kstrtouint(buf, 10, (unsigned int *)&slot);
                if (err < 0)
                        return err;
+               if (slot < 0)
+                       /* overflow */
+                       return -ENOSPC;
        }
        if (rdev->mddev->pers && slot == -1) {
                /* Setting 'slot' on an active array requires also
@@ -6256,6 +6259,11 @@ static void __md_stop(struct mddev *mddev)
                mddev->to_remove = &md_redundancy_group;
        module_put(pers->owner);
        clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+
+       percpu_ref_exit(&mddev->writes_pending);
+       percpu_ref_exit(&mddev->active_io);
+       bioset_exit(&mddev->bio_set);
+       bioset_exit(&mddev->sync_set);
 }
 
 void md_stop(struct mddev *mddev)
@@ -6265,10 +6273,6 @@ void md_stop(struct mddev *mddev)
         */
        __md_stop_writes(mddev);
        __md_stop(mddev);
-       percpu_ref_exit(&mddev->writes_pending);
-       percpu_ref_exit(&mddev->active_io);
-       bioset_exit(&mddev->bio_set);
-       bioset_exit(&mddev->sync_set);
 }
 
 EXPORT_SYMBOL_GPL(md_stop);
@@ -7839,11 +7843,6 @@ static void md_free_disk(struct gendisk *disk)
 {
        struct mddev *mddev = disk->private_data;
 
-       percpu_ref_exit(&mddev->writes_pending);
-       percpu_ref_exit(&mddev->active_io);
-       bioset_exit(&mddev->bio_set);
-       bioset_exit(&mddev->sync_set);
-
        mddev_free(mddev);
 }
 
index 2b01873ba0db51c9c999a2d578591a85c944f0e1..5c2336f318d9a132d18b815f505da78bc033ab92 100644 (file)
@@ -488,7 +488,7 @@ static enum m5mols_restype __find_restype(u32 code)
        do {
                if (code == m5mols_default_ffmt[type].code)
                        return type;
-       } while (type++ != SIZE_DEFAULT_FFMT);
+       } while (++type != SIZE_DEFAULT_FFMT);
 
        return 0;
 }
index 592907546ee64a7acc48e65980ecb8e695f24bf4..5cd28619ea9fbcc360ddfafbea9753221ef8aefa 100644 (file)
@@ -794,16 +794,12 @@ static int tegra_mc_interconnect_setup(struct tegra_mc *mc)
        mc->provider.aggregate = mc->soc->icc_ops->aggregate;
        mc->provider.xlate_extended = mc->soc->icc_ops->xlate_extended;
 
-       err = icc_provider_add(&mc->provider);
-       if (err)
-               return err;
+       icc_provider_init(&mc->provider);
 
        /* create Memory Controller node */
        node = icc_node_create(TEGRA_ICC_MC);
-       if (IS_ERR(node)) {
-               err = PTR_ERR(node);
-               goto del_provider;
-       }
+       if (IS_ERR(node))
+               return PTR_ERR(node);
 
        node->name = "Memory Controller";
        icc_node_add(node, &mc->provider);
@@ -830,12 +826,14 @@ static int tegra_mc_interconnect_setup(struct tegra_mc *mc)
                        goto remove_nodes;
        }
 
+       err = icc_provider_register(&mc->provider);
+       if (err)
+               goto remove_nodes;
+
        return 0;
 
 remove_nodes:
        icc_nodes_remove(&mc->provider);
-del_provider:
-       icc_provider_del(&mc->provider);
 
        return err;
 }
index 85bc936c02f9401eb71b11e070757345c9b45553..00ed2b6a0d1b27e0466bd2ab14e82b7efc6c1ae0 100644 (file)
@@ -1351,15 +1351,13 @@ static int tegra_emc_interconnect_init(struct tegra_emc *emc)
        emc->provider.aggregate = soc->icc_ops->aggregate;
        emc->provider.xlate_extended = emc_of_icc_xlate_extended;
 
-       err = icc_provider_add(&emc->provider);
-       if (err)
-               goto err_msg;
+       icc_provider_init(&emc->provider);
 
        /* create External Memory Controller node */
        node = icc_node_create(TEGRA_ICC_EMC);
        if (IS_ERR(node)) {
                err = PTR_ERR(node);
-               goto del_provider;
+               goto err_msg;
        }
 
        node->name = "External Memory Controller";
@@ -1380,12 +1378,14 @@ static int tegra_emc_interconnect_init(struct tegra_emc *emc)
        node->name = "External Memory (DRAM)";
        icc_node_add(node, &emc->provider);
 
+       err = icc_provider_register(&emc->provider);
+       if (err)
+               goto remove_nodes;
+
        return 0;
 
 remove_nodes:
        icc_nodes_remove(&emc->provider);
-del_provider:
-       icc_provider_del(&emc->provider);
 err_msg:
        dev_err(emc->dev, "failed to initialize ICC: %d\n", err);
 
index bd4e37b6552de5d5a426ac963f0116352ca6590d..fd595c851a27865bb9044818596062e797ed80f6 100644 (file)
@@ -1021,15 +1021,13 @@ static int tegra_emc_interconnect_init(struct tegra_emc *emc)
        emc->provider.aggregate = soc->icc_ops->aggregate;
        emc->provider.xlate_extended = emc_of_icc_xlate_extended;
 
-       err = icc_provider_add(&emc->provider);
-       if (err)
-               goto err_msg;
+       icc_provider_init(&emc->provider);
 
        /* create External Memory Controller node */
        node = icc_node_create(TEGRA_ICC_EMC);
        if (IS_ERR(node)) {
                err = PTR_ERR(node);
-               goto del_provider;
+               goto err_msg;
        }
 
        node->name = "External Memory Controller";
@@ -1050,12 +1048,14 @@ static int tegra_emc_interconnect_init(struct tegra_emc *emc)
        node->name = "External Memory (DRAM)";
        icc_node_add(node, &emc->provider);
 
+       err = icc_provider_register(&emc->provider);
+       if (err)
+               goto remove_nodes;
+
        return 0;
 
 remove_nodes:
        icc_nodes_remove(&emc->provider);
-del_provider:
-       icc_provider_del(&emc->provider);
 err_msg:
        dev_err(emc->dev, "failed to initialize ICC: %d\n", err);
 
index 77706e9bc5433917712a5c52e68f80134b7fd322..c91e9b7e2e019cf4af40fb0e5253f55455e984c1 100644 (file)
@@ -1533,15 +1533,13 @@ static int tegra_emc_interconnect_init(struct tegra_emc *emc)
        emc->provider.aggregate = soc->icc_ops->aggregate;
        emc->provider.xlate_extended = emc_of_icc_xlate_extended;
 
-       err = icc_provider_add(&emc->provider);
-       if (err)
-               goto err_msg;
+       icc_provider_init(&emc->provider);
 
        /* create External Memory Controller node */
        node = icc_node_create(TEGRA_ICC_EMC);
        if (IS_ERR(node)) {
                err = PTR_ERR(node);
-               goto del_provider;
+               goto err_msg;
        }
 
        node->name = "External Memory Controller";
@@ -1562,12 +1560,14 @@ static int tegra_emc_interconnect_init(struct tegra_emc *emc)
        node->name = "External Memory (DRAM)";
        icc_node_add(node, &emc->provider);
 
+       err = icc_provider_register(&emc->provider);
+       if (err)
+               goto remove_nodes;
+
        return 0;
 
 remove_nodes:
        icc_nodes_remove(&emc->provider);
-del_provider:
-       icc_provider_del(&emc->provider);
 err_msg:
        dev_err(emc->dev, "failed to initialize ICC: %d\n", err);
 
index 40f5969b07a6678cdaae9734931c6e7f1d775e73..dab1508bf83c67ea8cbcf3d07d6f3a207930735a 100644 (file)
@@ -51,7 +51,7 @@ static int dw_mci_starfive_execute_tuning(struct dw_mci_slot *slot,
        struct dw_mci *host = slot->host;
        struct starfive_priv *priv = host->priv;
        int rise_point = -1, fall_point = -1;
-       int err, prev_err;
+       int err, prev_err = 0;
        int i;
        bool found = 0;
        u32 regval;
index 7ef828942df359c53339b59ae6ecda11e6ed2d04..89953093e20c7efb61c9e444a7750c4a778f498a 100644 (file)
@@ -369,7 +369,7 @@ static void sdhci_am654_write_b(struct sdhci_host *host, u8 val, int reg)
                                        MAX_POWER_ON_TIMEOUT, false, host, val,
                                        reg);
                if (ret)
-                       dev_warn(mmc_dev(host->mmc), "Power on failed\n");
+                       dev_info(mmc_dev(host->mmc), "Power on failed\n");
        }
 }
 
index 00646aa315c307f6708f179b5180a9c24813da59..236e5219c8112ce615d49c61efb1a9e88df7b39f 100644 (file)
@@ -1775,6 +1775,19 @@ void bond_lower_state_changed(struct slave *slave)
                slave_err(bond_dev, slave_dev, "Error: %s\n", errmsg);  \
 } while (0)
 
+/* The bonding driver uses ether_setup() to convert a master bond device
+ * to ARPHRD_ETHER, that resets the target netdevice's flags so we always
+ * have to restore the IFF_MASTER flag, and only restore IFF_SLAVE if it was set
+ */
+static void bond_ether_setup(struct net_device *bond_dev)
+{
+       unsigned int slave_flag = bond_dev->flags & IFF_SLAVE;
+
+       ether_setup(bond_dev);
+       bond_dev->flags |= IFF_MASTER | slave_flag;
+       bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+}
+
 /* enslave device <slave> to bond device <master> */
 int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
                 struct netlink_ext_ack *extack)
@@ -1866,10 +1879,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
 
                        if (slave_dev->type != ARPHRD_ETHER)
                                bond_setup_by_slave(bond_dev, slave_dev);
-                       else {
-                               ether_setup(bond_dev);
-                               bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
-                       }
+                       else
+                               bond_ether_setup(bond_dev);
 
                        call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
                                                 bond_dev);
@@ -2289,9 +2300,7 @@ err_undo_flags:
                        eth_hw_addr_random(bond_dev);
                if (bond_dev->type != ARPHRD_ETHER) {
                        dev_close(bond_dev);
-                       ether_setup(bond_dev);
-                       bond_dev->flags |= IFF_MASTER;
-                       bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+                       bond_ether_setup(bond_dev);
                }
        }
 
index 8d916e2ee6c252e22a200376787e4f84351c7363..8dcc32e4e30efac4f655cf9f28d856758d97252a 100644 (file)
@@ -93,20 +93,20 @@ static int cc770_get_of_node_data(struct platform_device *pdev,
        if (priv->can.clock.freq > 8000000)
                priv->cpu_interface |= CPUIF_DMC;
 
-       if (of_get_property(np, "bosch,divide-memory-clock", NULL))
+       if (of_property_read_bool(np, "bosch,divide-memory-clock"))
                priv->cpu_interface |= CPUIF_DMC;
-       if (of_get_property(np, "bosch,iso-low-speed-mux", NULL))
+       if (of_property_read_bool(np, "bosch,iso-low-speed-mux"))
                priv->cpu_interface |= CPUIF_MUX;
 
        if (!of_get_property(np, "bosch,no-comperator-bypass", NULL))
                priv->bus_config |= BUSCFG_CBY;
-       if (of_get_property(np, "bosch,disconnect-rx0-input", NULL))
+       if (of_property_read_bool(np, "bosch,disconnect-rx0-input"))
                priv->bus_config |= BUSCFG_DR0;
-       if (of_get_property(np, "bosch,disconnect-rx1-input", NULL))
+       if (of_property_read_bool(np, "bosch,disconnect-rx1-input"))
                priv->bus_config |= BUSCFG_DR1;
-       if (of_get_property(np, "bosch,disconnect-tx1-output", NULL))
+       if (of_property_read_bool(np, "bosch,disconnect-tx1-output"))
                priv->bus_config |= BUSCFG_DT1;
-       if (of_get_property(np, "bosch,polarity-dominant", NULL))
+       if (of_property_read_bool(np, "bosch,polarity-dominant"))
                priv->bus_config |= BUSCFG_POL;
 
        prop = of_get_property(np, "bosch,clock-out-frequency", &prop_size);
index e968322dfbf0b262e23cd55fa482d5eb268fd2dc..70887e0aece33dd20edbf32d79d794bb01cb98b9 100644 (file)
@@ -263,7 +263,7 @@ static int b53_mmap_probe_of(struct platform_device *pdev,
                if (of_property_read_u32(of_port, "reg", &reg))
                        continue;
 
-               if (reg < B53_CPU_PORT)
+               if (reg < B53_N_PORTS)
                        pdata->enabled_ports |= BIT(reg);
        }
 
index 729b36eeb2c46acacf9312f1af6fd7a68f40455e..7fc2155d93d6ece8d20faf627e097bb80d9e58d8 100644 (file)
@@ -319,7 +319,7 @@ static const u16 ksz8795_regs[] = {
        [S_BROADCAST_CTRL]              = 0x06,
        [S_MULTICAST_CTRL]              = 0x04,
        [P_XMII_CTRL_0]                 = 0x06,
-       [P_XMII_CTRL_1]                 = 0x56,
+       [P_XMII_CTRL_1]                 = 0x06,
 };
 
 static const u32 ksz8795_masks[] = {
index a508402c4ecbf60379097106a47a7dc53bb96ac8..02410ac439b76c18c915562ebddd35aec685364a 100644 (file)
@@ -396,6 +396,9 @@ mt7530_fdb_write(struct mt7530_priv *priv, u16 vid,
 /* Set up switch core clock for MT7530 */
 static void mt7530_pll_setup(struct mt7530_priv *priv)
 {
+       /* Disable core clock */
+       core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
+
        /* Disable PLL */
        core_write(priv, CORE_GSWPLL_GRP1, 0);
 
@@ -409,14 +412,19 @@ static void mt7530_pll_setup(struct mt7530_priv *priv)
                   RG_GSWPLL_EN_PRE |
                   RG_GSWPLL_POSDIV_200M(2) |
                   RG_GSWPLL_FBKDIV_200M(32));
+
+       udelay(20);
+
+       /* Enable core clock */
+       core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
 }
 
-/* Setup TX circuit including relevant PAD and driving */
+/* Setup port 6 interface mode and TRGMII TX circuit */
 static int
 mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
 {
        struct mt7530_priv *priv = ds->priv;
-       u32 ncpo1, ssc_delta, trgint, i, xtal;
+       u32 ncpo1, ssc_delta, trgint, xtal;
 
        xtal = mt7530_read(priv, MT7530_MHWTRAP) & HWTRAP_XTAL_MASK;
 
@@ -430,11 +438,13 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
        switch (interface) {
        case PHY_INTERFACE_MODE_RGMII:
                trgint = 0;
-               /* PLL frequency: 125MHz */
-               ncpo1 = 0x0c80;
                break;
        case PHY_INTERFACE_MODE_TRGMII:
                trgint = 1;
+               if (xtal == HWTRAP_XTAL_25MHZ)
+                       ssc_delta = 0x57;
+               else
+                       ssc_delta = 0x87;
                if (priv->id == ID_MT7621) {
                        /* PLL frequency: 150MHz: 1.2GBit */
                        if (xtal == HWTRAP_XTAL_40MHZ)
@@ -454,46 +464,32 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
                return -EINVAL;
        }
 
-       if (xtal == HWTRAP_XTAL_25MHZ)
-               ssc_delta = 0x57;
-       else
-               ssc_delta = 0x87;
-
        mt7530_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_MASK,
                   P6_INTF_MODE(trgint));
 
-       /* Lower Tx Driving for TRGMII path */
-       for (i = 0 ; i < NUM_TRGMII_CTRL ; i++)
-               mt7530_write(priv, MT7530_TRGMII_TD_ODT(i),
-                            TD_DM_DRVP(8) | TD_DM_DRVN(8));
+       if (trgint) {
+               /* Disable the MT7530 TRGMII clocks */
+               core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN);
+
+               /* Setup the MT7530 TRGMII Tx Clock */
+               core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1));
+               core_write(priv, CORE_PLL_GROUP6, RG_LCDDS_PCW_NCPO0(0));
+               core_write(priv, CORE_PLL_GROUP10, RG_LCDDS_SSC_DELTA(ssc_delta));
+               core_write(priv, CORE_PLL_GROUP11, RG_LCDDS_SSC_DELTA1(ssc_delta));
+               core_write(priv, CORE_PLL_GROUP4,
+                          RG_SYSPLL_DDSFBK_EN | RG_SYSPLL_BIAS_EN |
+                          RG_SYSPLL_BIAS_LPF_EN);
+               core_write(priv, CORE_PLL_GROUP2,
+                          RG_SYSPLL_EN_NORMAL | RG_SYSPLL_VODEN |
+                          RG_SYSPLL_POSDIV(1));
+               core_write(priv, CORE_PLL_GROUP7,
+                          RG_LCDDS_PCW_NCPO_CHG | RG_LCCDS_C(3) |
+                          RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
+
+               /* Enable the MT7530 TRGMII clocks */
+               core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN);
+       }
 
-       /* Disable MT7530 core and TRGMII Tx clocks */
-       core_clear(priv, CORE_TRGMII_GSW_CLK_CG,
-                  REG_GSWCK_EN | REG_TRGMIICK_EN);
-
-       /* Setup the MT7530 TRGMII Tx Clock */
-       core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1));
-       core_write(priv, CORE_PLL_GROUP6, RG_LCDDS_PCW_NCPO0(0));
-       core_write(priv, CORE_PLL_GROUP10, RG_LCDDS_SSC_DELTA(ssc_delta));
-       core_write(priv, CORE_PLL_GROUP11, RG_LCDDS_SSC_DELTA1(ssc_delta));
-       core_write(priv, CORE_PLL_GROUP4,
-                  RG_SYSPLL_DDSFBK_EN | RG_SYSPLL_BIAS_EN |
-                  RG_SYSPLL_BIAS_LPF_EN);
-       core_write(priv, CORE_PLL_GROUP2,
-                  RG_SYSPLL_EN_NORMAL | RG_SYSPLL_VODEN |
-                  RG_SYSPLL_POSDIV(1));
-       core_write(priv, CORE_PLL_GROUP7,
-                  RG_LCDDS_PCW_NCPO_CHG | RG_LCCDS_C(3) |
-                  RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
-
-       /* Enable MT7530 core and TRGMII Tx clocks */
-       core_set(priv, CORE_TRGMII_GSW_CLK_CG,
-                REG_GSWCK_EN | REG_TRGMIICK_EN);
-
-       if (!trgint)
-               for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
-                       mt7530_rmw(priv, MT7530_TRGMII_RD(i),
-                                  RD_TAP_MASK, RD_TAP(16));
        return 0;
 }
 
@@ -2201,7 +2197,16 @@ mt7530_setup(struct dsa_switch *ds)
 
        mt7530_pll_setup(priv);
 
-       /* Enable Port 6 only; P5 as GMAC5 which currently is not supported */
+       /* Lower Tx driving for TRGMII path */
+       for (i = 0; i < NUM_TRGMII_CTRL; i++)
+               mt7530_write(priv, MT7530_TRGMII_TD_ODT(i),
+                            TD_DM_DRVP(8) | TD_DM_DRVN(8));
+
+       for (i = 0; i < NUM_TRGMII_CTRL; i++)
+               mt7530_rmw(priv, MT7530_TRGMII_RD(i),
+                          RD_TAP_MASK, RD_TAP(16));
+
+       /* Enable port 6 */
        val = mt7530_read(priv, MT7530_MHWTRAP);
        val &= ~MHWTRAP_P6_DIS & ~MHWTRAP_PHY_ACCESS;
        val |= MHWTRAP_MANUAL;
index 0a5d6c7bb128dfd8dab5b200f72d0a1f6c7396b1..30383c4f8fd0ec6ebe5d4063bea77f54d7c52496 100644 (file)
@@ -3549,7 +3549,7 @@ static int mv88e6xxx_get_max_mtu(struct dsa_switch *ds, int port)
                return 10240 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
        else if (chip->info->ops->set_max_frame_size)
                return 1632 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
-       return 1522 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
+       return ETH_DATA_LEN;
 }
 
 static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
@@ -3557,6 +3557,17 @@ static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
        struct mv88e6xxx_chip *chip = ds->priv;
        int ret = 0;
 
+       /* For families where we don't know how to alter the MTU,
+        * just accept any value up to ETH_DATA_LEN
+        */
+       if (!chip->info->ops->port_set_jumbo_size &&
+           !chip->info->ops->set_max_frame_size) {
+               if (new_mtu > ETH_DATA_LEN)
+                       return -EINVAL;
+
+               return 0;
+       }
+
        if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
                new_mtu += EDSA_HLEN;
 
@@ -3565,9 +3576,6 @@ static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
                ret = chip->info->ops->port_set_jumbo_size(chip, port, new_mtu);
        else if (chip->info->ops->set_max_frame_size)
                ret = chip->info->ops->set_max_frame_size(chip, new_mtu);
-       else
-               if (new_mtu > 1522)
-                       ret = -EINVAL;
        mv88e6xxx_reg_unlock(chip);
 
        return ret;
index 8da79eedc057c2f68dbbe5cd3ff6656934f915b7..1d4f2f4d10f2967fda9d4a1a17f5b7e7b17111f8 100644 (file)
@@ -850,11 +850,20 @@ static int ena_set_channels(struct net_device *netdev,
        struct ena_adapter *adapter = netdev_priv(netdev);
        u32 count = channels->combined_count;
        /* The check for max value is already done in ethtool */
-       if (count < ENA_MIN_NUM_IO_QUEUES ||
-           (ena_xdp_present(adapter) &&
-           !ena_xdp_legal_queue_count(adapter, count)))
+       if (count < ENA_MIN_NUM_IO_QUEUES)
                return -EINVAL;
 
+       if (!ena_xdp_legal_queue_count(adapter, count)) {
+               if (ena_xdp_present(adapter))
+                       return -EINVAL;
+
+               xdp_clear_features_flag(netdev);
+       } else {
+               xdp_set_features_flag(netdev,
+                                     NETDEV_XDP_ACT_BASIC |
+                                     NETDEV_XDP_ACT_REDIRECT);
+       }
+
        return ena_update_queue_count(adapter, count);
 }
 
index d3999db7c6a29d1f6677ca9de4ff197ee28fda4b..cbfe7f977270f7f5134d766b552449a35bd65927 100644 (file)
@@ -4105,8 +4105,6 @@ static void ena_set_conf_feat_params(struct ena_adapter *adapter,
        /* Set offload features */
        ena_set_dev_offloads(feat, netdev);
 
-       netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
-
        adapter->max_mtu = feat->dev_attr.max_mtu;
        netdev->max_mtu = adapter->max_mtu;
        netdev->min_mtu = ENA_MIN_MTU;
@@ -4393,6 +4391,10 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        ena_config_debug_area(adapter);
 
+       if (ena_xdp_legal_queue_count(adapter, adapter->num_io_queues))
+               netdev->xdp_features = NETDEV_XDP_ACT_BASIC |
+                                      NETDEV_XDP_ACT_REDIRECT;
+
        memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len);
 
        netif_carrier_off(netdev);
index 1e8d902e1c8eaa45df1ad5231299f10247919958..7f933175cbdac9aa643246190a0f6b297af19414 100644 (file)
@@ -412,6 +412,25 @@ int aq_xdp_xmit(struct net_device *dev, int num_frames,
        return num_frames - drop;
 }
 
+static struct sk_buff *aq_xdp_build_skb(struct xdp_buff *xdp,
+                                       struct net_device *dev,
+                                       struct aq_ring_buff_s *buff)
+{
+       struct xdp_frame *xdpf;
+       struct sk_buff *skb;
+
+       xdpf = xdp_convert_buff_to_frame(xdp);
+       if (unlikely(!xdpf))
+               return NULL;
+
+       skb = xdp_build_skb_from_frame(xdpf, dev);
+       if (!skb)
+               return NULL;
+
+       aq_get_rxpages_xdp(buff, xdp);
+       return skb;
+}
+
 static struct sk_buff *aq_xdp_run_prog(struct aq_nic_s *aq_nic,
                                       struct xdp_buff *xdp,
                                       struct aq_ring_s *rx_ring,
@@ -431,7 +450,7 @@ static struct sk_buff *aq_xdp_run_prog(struct aq_nic_s *aq_nic,
 
        prog = READ_ONCE(rx_ring->xdp_prog);
        if (!prog)
-               goto pass;
+               return aq_xdp_build_skb(xdp, aq_nic->ndev, buff);
 
        prefetchw(xdp->data_hard_start); /* xdp_frame write */
 
@@ -442,17 +461,12 @@ static struct sk_buff *aq_xdp_run_prog(struct aq_nic_s *aq_nic,
        act = bpf_prog_run_xdp(prog, xdp);
        switch (act) {
        case XDP_PASS:
-pass:
-               xdpf = xdp_convert_buff_to_frame(xdp);
-               if (unlikely(!xdpf))
-                       goto out_aborted;
-               skb = xdp_build_skb_from_frame(xdpf, aq_nic->ndev);
+               skb = aq_xdp_build_skb(xdp, aq_nic->ndev, buff);
                if (!skb)
                        goto out_aborted;
                u64_stats_update_begin(&rx_ring->stats.rx.syncp);
                ++rx_ring->stats.rx.xdp_pass;
                u64_stats_update_end(&rx_ring->stats.rx.syncp);
-               aq_get_rxpages_xdp(buff, xdp);
                return skb;
        case XDP_TX:
                xdpf = xdp_convert_buff_to_frame(xdp);
index 808236dc898b8d15fe41dc7f508fe40e87a13000..e2e2c986c82b705a71b14a8aff95ce1f9952e4d0 100644 (file)
@@ -6990,11 +6990,9 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
                if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
                        bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
        }
-       if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)) {
+       if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
                bp->flags |= BNXT_FLAG_MULTI_HOST;
-               if (bp->fw_cap & BNXT_FW_CAP_PTP_RTC)
-                       bp->fw_cap &= ~BNXT_FW_CAP_PTP_RTC;
-       }
+
        if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
                bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
 
index dcb09fbe4007814ab9fec7bb03f06c4d0460ebe5..c0628ac1b798afb7bd4bc5c32983887a1df05a81 100644 (file)
@@ -2000,6 +2000,8 @@ struct bnxt {
        u32                     fw_dbg_cap;
 
 #define BNXT_NEW_RM(bp)                ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
+#define BNXT_PTP_USE_RTC(bp)   (!BNXT_MH(bp) && \
+                                ((bp)->fw_cap & BNXT_FW_CAP_PTP_RTC))
        u32                     hwrm_spec_code;
        u16                     hwrm_cmd_seq;
        u16                     hwrm_cmd_kong_seq;
index 4ec8bba18cdd2c8abd58fc4649d1516f2507669a..a3a3978a4d1c257d343b425b1350dbef66c318ae 100644 (file)
@@ -63,7 +63,7 @@ static int bnxt_ptp_settime(struct ptp_clock_info *ptp_info,
                                                ptp_info);
        u64 ns = timespec64_to_ns(ts);
 
-       if (ptp->bp->fw_cap & BNXT_FW_CAP_PTP_RTC)
+       if (BNXT_PTP_USE_RTC(ptp->bp))
                return bnxt_ptp_cfg_settime(ptp->bp, ns);
 
        spin_lock_bh(&ptp->ptp_lock);
@@ -196,7 +196,7 @@ static int bnxt_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
        struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
                                                ptp_info);
 
-       if (ptp->bp->fw_cap & BNXT_FW_CAP_PTP_RTC)
+       if (BNXT_PTP_USE_RTC(ptp->bp))
                return bnxt_ptp_adjphc(ptp, delta);
 
        spin_lock_bh(&ptp->ptp_lock);
@@ -205,34 +205,39 @@ static int bnxt_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
        return 0;
 }
 
+static int bnxt_ptp_adjfine_rtc(struct bnxt *bp, long scaled_ppm)
+{
+       s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
+       struct hwrm_port_mac_cfg_input *req;
+       int rc;
+
+       rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG);
+       if (rc)
+               return rc;
+
+       req->ptp_freq_adj_ppb = cpu_to_le32(ppb);
+       req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB);
+       rc = hwrm_req_send(bp, req);
+       if (rc)
+               netdev_err(bp->dev,
+                          "ptp adjfine failed. rc = %d\n", rc);
+       return rc;
+}
+
 static int bnxt_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
 {
        struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
                                                ptp_info);
-       struct hwrm_port_mac_cfg_input *req;
        struct bnxt *bp = ptp->bp;
-       int rc = 0;
 
-       if (!(ptp->bp->fw_cap & BNXT_FW_CAP_PTP_RTC)) {
-               spin_lock_bh(&ptp->ptp_lock);
-               timecounter_read(&ptp->tc);
-               ptp->cc.mult = adjust_by_scaled_ppm(ptp->cmult, scaled_ppm);
-               spin_unlock_bh(&ptp->ptp_lock);
-       } else {
-               s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
-
-               rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG);
-               if (rc)
-                       return rc;
+       if (BNXT_PTP_USE_RTC(bp))
+               return bnxt_ptp_adjfine_rtc(bp, scaled_ppm);
 
-               req->ptp_freq_adj_ppb = cpu_to_le32(ppb);
-               req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB);
-               rc = hwrm_req_send(ptp->bp, req);
-               if (rc)
-                       netdev_err(ptp->bp->dev,
-                                  "ptp adjfine failed. rc = %d\n", rc);
-       }
-       return rc;
+       spin_lock_bh(&ptp->ptp_lock);
+       timecounter_read(&ptp->tc);
+       ptp->cc.mult = adjust_by_scaled_ppm(ptp->cmult, scaled_ppm);
+       spin_unlock_bh(&ptp->ptp_lock);
+       return 0;
 }
 
 void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2)
@@ -879,7 +884,7 @@ int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg)
        u64 ns;
        int rc;
 
-       if (!bp->ptp_cfg || !(bp->fw_cap & BNXT_FW_CAP_PTP_RTC))
+       if (!bp->ptp_cfg || !BNXT_PTP_USE_RTC(bp))
                return -ENODEV;
 
        if (!phc_cfg) {
@@ -932,13 +937,14 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg)
        atomic_set(&ptp->tx_avail, BNXT_MAX_TX_TS);
        spin_lock_init(&ptp->ptp_lock);
 
-       if (bp->fw_cap & BNXT_FW_CAP_PTP_RTC) {
+       if (BNXT_PTP_USE_RTC(bp)) {
                bnxt_ptp_timecounter_init(bp, false);
                rc = bnxt_ptp_init_rtc(bp, phc_cfg);
                if (rc)
                        goto out;
        } else {
                bnxt_ptp_timecounter_init(bp, true);
+               bnxt_ptp_adjfine_rtc(bp, 0);
        }
 
        ptp->ptp_info = bnxt_ptp_caps;
index 6e141a8bbf43cbcc36d6bd9eee78eb9d32d599d8..66e30561569eb3aa660fdf79a7c5b2f8af49e76f 100644 (file)
@@ -4990,7 +4990,7 @@ static int macb_probe(struct platform_device *pdev)
                bp->jumbo_max_len = macb_config->jumbo_max_len;
 
        bp->wol = 0;
-       if (of_get_property(np, "magic-packet", NULL))
+       if (of_property_read_bool(np, "magic-packet"))
                bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
        device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
 
index e5c71f90785237348ab57924037dea3e85f04f09..d8d71bf97983b859c68c95899f65fc90dc66bb69 100644 (file)
@@ -735,12 +735,17 @@ static int nicvf_set_channels(struct net_device *dev,
        if (channel->tx_count > nic->max_queues)
                return -EINVAL;
 
-       if (nic->xdp_prog &&
-           ((channel->tx_count + channel->rx_count) > nic->max_queues)) {
-               netdev_err(nic->netdev,
-                          "XDP mode, RXQs + TXQs > Max %d\n",
-                          nic->max_queues);
-               return -EINVAL;
+       if (channel->tx_count + channel->rx_count > nic->max_queues) {
+               if (nic->xdp_prog) {
+                       netdev_err(nic->netdev,
+                                  "XDP mode, RXQs + TXQs > Max %d\n",
+                                  nic->max_queues);
+                       return -EINVAL;
+               }
+
+               xdp_clear_features_flag(nic->netdev);
+       } else if (!pass1_silicon(nic->pdev)) {
+               xdp_set_features_flag(dev, NETDEV_XDP_ACT_BASIC);
        }
 
        if (if_up)
index 8b25313c7f6b8fa28d58cbfb50ff46515880c1f0..eff350e0bc2a8ec7d1f3584028d21762afecfe3d 100644 (file)
@@ -2218,7 +2218,9 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        netdev->netdev_ops = &nicvf_netdev_ops;
        netdev->watchdog_timeo = NICVF_TX_TIMEOUT;
 
-       netdev->xdp_features = NETDEV_XDP_ACT_BASIC;
+       if (!pass1_silicon(nic->pdev) &&
+           nic->rx_queues + nic->tx_queues <= nic->max_queues)
+               netdev->xdp_features = NETDEV_XDP_ACT_BASIC;
 
        /* MTU range: 64 - 9200 */
        netdev->min_mtu = NIC_HW_MIN_FRS;
index b21e56de61671a90ef3906a03f98ff7c879a988d..05a89ab6766c4161d48ec9af79997bba1bc31f14 100644 (file)
@@ -1393,9 +1393,9 @@ static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev)
        if (!pdata)
                return ERR_PTR(-ENOMEM);
 
-       if (of_find_property(np, "davicom,ext-phy", NULL))
+       if (of_property_read_bool(np, "davicom,ext-phy"))
                pdata->flags |= DM9000_PLATF_EXT_PHY;
-       if (of_find_property(np, "davicom,no-eeprom", NULL))
+       if (of_property_read_bool(np, "davicom,no-eeprom"))
                pdata->flags |= DM9000_PLATF_NO_EEPROM;
 
        ret = of_get_mac_address(np, pdata->dev_addr);
index bca68edfbe9cd53c8128d91d1de9ac0fde2e1570..da9d4b310fcdd9b4211c649328ca5dbf8ba2ba49 100644 (file)
@@ -370,8 +370,7 @@ static const struct ethtool_rmon_hist_range enetc_rmon_ranges[] = {
 };
 
 static void enetc_rmon_stats(struct enetc_hw *hw, int mac,
-                            struct ethtool_rmon_stats *s,
-                            const struct ethtool_rmon_hist_range **ranges)
+                            struct ethtool_rmon_stats *s)
 {
        s->undersize_pkts = enetc_port_rd(hw, ENETC_PM_RUND(mac));
        s->oversize_pkts = enetc_port_rd(hw, ENETC_PM_ROVR(mac));
@@ -393,8 +392,6 @@ static void enetc_rmon_stats(struct enetc_hw *hw, int mac,
        s->hist_tx[4] = enetc_port_rd(hw, ENETC_PM_T1023(mac));
        s->hist_tx[5] = enetc_port_rd(hw, ENETC_PM_T1522(mac));
        s->hist_tx[6] = enetc_port_rd(hw, ENETC_PM_T1523X(mac));
-
-       *ranges = enetc_rmon_ranges;
 }
 
 static void enetc_get_eth_mac_stats(struct net_device *ndev,
@@ -447,13 +444,15 @@ static void enetc_get_rmon_stats(struct net_device *ndev,
        struct enetc_hw *hw = &priv->si->hw;
        struct enetc_si *si = priv->si;
 
+       *ranges = enetc_rmon_ranges;
+
        switch (rmon_stats->src) {
        case ETHTOOL_MAC_STATS_SRC_EMAC:
-               enetc_rmon_stats(hw, 0, rmon_stats, ranges);
+               enetc_rmon_stats(hw, 0, rmon_stats);
                break;
        case ETHTOOL_MAC_STATS_SRC_PMAC:
                if (si->hw_features & ENETC_SI_F_QBU)
-                       enetc_rmon_stats(hw, 1, rmon_stats, ranges);
+                       enetc_rmon_stats(hw, 1, rmon_stats);
                break;
        case ETHTOOL_MAC_STATS_SRC_AGGREGATE:
                ethtool_aggregate_rmon_stats(ndev, rmon_stats);
index c73e25f8995eb9b0a95ba118a1ce007110e6f8ff..f3b16a6673e2bcb95d5944e87851708e0fd9943a 100644 (file)
@@ -4251,7 +4251,7 @@ fec_probe(struct platform_device *pdev)
        if (ret)
                goto failed_ipc_init;
 
-       if (of_get_property(np, "fsl,magic-packet", NULL))
+       if (of_property_read_bool(np, "fsl,magic-packet"))
                fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
 
        ret = fec_enet_init_stop_mode(fep, np);
index a7f4c3c29f3e41965609aed79e1a145478755b20..b88816b71ddff3dca52753d24d90817014d9924b 100644 (file)
@@ -937,7 +937,7 @@ static int mpc52xx_fec_probe(struct platform_device *op)
        priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
 
        /* the 7-wire property means don't use MII mode */
-       if (of_find_property(np, "fsl,7-wire-mode", NULL)) {
+       if (of_property_read_bool(np, "fsl,7-wire-mode")) {
                priv->seven_wire_mode = 1;
                dev_info(&ndev->dev, "using 7-wire PHY mode\n");
        }
index b2def295523ab5ce23b547fae1a21b7662c25f17..38d5013c6fedf5a654cf67eadbdae8eed5fb8ac1 100644 (file)
@@ -787,10 +787,10 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
        else
                priv->interface = gfar_get_interface(dev);
 
-       if (of_find_property(np, "fsl,magic-packet", NULL))
+       if (of_property_read_bool(np, "fsl,magic-packet"))
                priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
 
-       if (of_get_property(np, "fsl,wake-on-filer", NULL))
+       if (of_property_read_bool(np, "fsl,wake-on-filer"))
                priv->device_flags |= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER;
 
        priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
index ce574d097e280f9dbf52663661ebeffa4f7b8ba9..5f81470843b49658b882658eea8f75e109e9e5bc 100644 (file)
@@ -537,7 +537,10 @@ static int gve_get_link_ksettings(struct net_device *netdev,
                                  struct ethtool_link_ksettings *cmd)
 {
        struct gve_priv *priv = netdev_priv(netdev);
-       int err = gve_adminq_report_link_speed(priv);
+       int err = 0;
+
+       if (priv->link_speed == 0)
+               err = gve_adminq_report_link_speed(priv);
 
        cmd->base.speed = priv->link_speed;
        return err;
index daec9ce04531be2531a24d5b44bc83ad5dae3a2c..54bb4d9a0d1ea4230d8269dfc0184a0f7df93ff0 100644 (file)
@@ -78,6 +78,7 @@ static int sni_82596_probe(struct platform_device *dev)
        void __iomem *mpu_addr;
        void __iomem *ca_addr;
        u8 __iomem *eth_addr;
+       u8 mac[ETH_ALEN];
 
        res = platform_get_resource(dev, IORESOURCE_MEM, 0);
        ca = platform_get_resource(dev, IORESOURCE_MEM, 1);
@@ -109,12 +110,13 @@ static int sni_82596_probe(struct platform_device *dev)
                goto probe_failed;
 
        /* someone seems to like messed up stuff */
-       netdevice->dev_addr[0] = readb(eth_addr + 0x0b);
-       netdevice->dev_addr[1] = readb(eth_addr + 0x0a);
-       netdevice->dev_addr[2] = readb(eth_addr + 0x09);
-       netdevice->dev_addr[3] = readb(eth_addr + 0x08);
-       netdevice->dev_addr[4] = readb(eth_addr + 0x07);
-       netdevice->dev_addr[5] = readb(eth_addr + 0x06);
+       mac[0] = readb(eth_addr + 0x0b);
+       mac[1] = readb(eth_addr + 0x0a);
+       mac[2] = readb(eth_addr + 0x09);
+       mac[3] = readb(eth_addr + 0x08);
+       mac[4] = readb(eth_addr + 0x07);
+       mac[5] = readb(eth_addr + 0x06);
+       eth_hw_addr_set(netdevice, mac);
        iounmap(eth_addr);
 
        if (netdevice->irq < 0) {
index 9b08e41ccc294dae821d17a438b0f82b6d15182b..c97095abd26abcd88c6ac40cb9307a9ac90a3a94 100644 (file)
@@ -2939,9 +2939,9 @@ static int emac_init_config(struct emac_instance *dev)
        }
 
        /* Fixup some feature bits based on the device tree */
-       if (of_get_property(np, "has-inverted-stacr-oc", NULL))
+       if (of_property_read_bool(np, "has-inverted-stacr-oc"))
                dev->features |= EMAC_FTR_STACR_OC_INVERT;
-       if (of_get_property(np, "has-new-stacr-staopc", NULL))
+       if (of_property_read_bool(np, "has-new-stacr-staopc"))
                dev->features |= EMAC_FTR_HAS_NEW_STACR;
 
        /* CAB lacks the appropriate properties */
@@ -3042,7 +3042,7 @@ static int emac_probe(struct platform_device *ofdev)
         * property here for now, but new flat device trees should set a
         * status property to "disabled" instead.
         */
-       if (of_get_property(np, "unused", NULL) || !of_device_is_available(np))
+       if (of_property_read_bool(np, "unused") || !of_device_is_available(np))
                return -ENODEV;
 
        /* Find ourselves in the bootlist if we are there */
@@ -3333,7 +3333,7 @@ static void __init emac_make_bootlist(void)
 
                if (of_match_node(emac_match, np) == NULL)
                        continue;
-               if (of_get_property(np, "unused", NULL))
+               if (of_property_read_bool(np, "unused"))
                        continue;
                idx = of_get_property(np, "cell-index", NULL);
                if (idx == NULL)
index 242ef976fd15e6bf7a9ed9ffd4b87a2d4ccee694..50358cf0013068c45749b3cd905d71cc8cd92ba1 100644 (file)
@@ -242,7 +242,7 @@ static int rgmii_probe(struct platform_device *ofdev)
        }
 
        /* Check for RGMII flags */
-       if (of_get_property(ofdev->dev.of_node, "has-mdio", NULL))
+       if (of_property_read_bool(ofdev->dev.of_node, "has-mdio"))
                dev->flags |= EMAC_RGMII_FLAG_HAS_MDIO;
 
        /* CAB lacks the right properties, fix this up */
index 467001db5070ed568a9d4be4f5e3c6d957c526cb..228cd502bb48a93238b3581907b864845db8de26 100644 (file)
@@ -15525,6 +15525,7 @@ static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw)
        int err;
        int v_idx;
 
+       pci_set_drvdata(pf->pdev, pf);
        pci_save_state(pf->pdev);
 
        /* set up periodic task facility */
index 924f972b91faf2f69ce78ff5c8bd1f208a048da0..72b091f2509d8b0330d2f26e311b3acbce4e3547 100644 (file)
@@ -171,10 +171,10 @@ static char *i40e_create_dummy_packet(u8 *dummy_packet, bool ipv4, u8 l4proto,
                                      struct i40e_fdir_filter *data)
 {
        bool is_vlan = !!data->vlan_tag;
-       struct vlan_hdr vlan;
-       struct ipv6hdr ipv6;
-       struct ethhdr eth;
-       struct iphdr ip;
+       struct vlan_hdr vlan = {};
+       struct ipv6hdr ipv6 = {};
+       struct ethhdr eth = {};
+       struct iphdr ip = {};
        u8 *tmp;
 
        if (ipv4) {
index 16c490965b61a58843ddaeeaef0601524ab6a7e3..dd11dbbd5551a2c1d9f8a431d972655d9cabe79c 100644 (file)
@@ -661,7 +661,7 @@ struct iavf_rx_ptype_decoded iavf_ptype_lookup[BIT(8)] = {
        /* Non Tunneled IPv6 */
        IAVF_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
        IAVF_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
-       IAVF_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP,  PAY3),
+       IAVF_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP,  PAY4),
        IAVF_PTT_UNUSED_ENTRY(91),
        IAVF_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP,  PAY4),
        IAVF_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
index 3273aeb8fa67631421def6a71bab6faaef0e5d91..095201e83c9db002fc0667c24635dd68790f1a9a 100644 (file)
@@ -893,6 +893,10 @@ static int iavf_vlan_rx_add_vid(struct net_device *netdev,
 {
        struct iavf_adapter *adapter = netdev_priv(netdev);
 
+       /* Do not track VLAN 0 filter, always added by the PF on VF init */
+       if (!vid)
+               return 0;
+
        if (!VLAN_FILTERING_ALLOWED(adapter))
                return -EIO;
 
@@ -919,6 +923,10 @@ static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
 {
        struct iavf_adapter *adapter = netdev_priv(netdev);
 
+       /* We do not track VLAN 0 filter */
+       if (!vid)
+               return 0;
+
        iavf_del_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto)));
        if (proto == cpu_to_be16(ETH_P_8021Q))
                clear_bit(vid, adapter->vsi.active_cvlans);
@@ -5066,6 +5074,11 @@ static void iavf_remove(struct pci_dev *pdev)
                        mutex_unlock(&adapter->crit_lock);
                        break;
                }
+               /* Simply return if we already went through iavf_shutdown */
+               if (adapter->state == __IAVF_REMOVE) {
+                       mutex_unlock(&adapter->crit_lock);
+                       return;
+               }
 
                mutex_unlock(&adapter->crit_lock);
                usleep_range(500, 1000);
index 18b6a702a1d6dff9e9aeac001526444a6ca38736..e989feda133c1e63447f005437f09ba248084fdb 100644 (file)
@@ -1096,7 +1096,7 @@ static inline void iavf_rx_hash(struct iavf_ring *ring,
                cpu_to_le64((u64)IAVF_RX_DESC_FLTSTAT_RSS_HASH <<
                            IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT);
 
-       if (ring->netdev->features & NETIF_F_RXHASH)
+       if (!(ring->netdev->features & NETIF_F_RXHASH))
                return;
 
        if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
index 6d23338604bb33e07f2df55f3656296ed4db24b0..4e17d006c52d46930a5a349928ddf7c8a6c81af5 100644 (file)
@@ -2446,8 +2446,6 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
                list_for_each_entry(f, &adapter->vlan_filter_list, list) {
                        if (f->is_new_vlan) {
                                f->is_new_vlan = false;
-                               if (!f->vlan.vid)
-                                       continue;
                                if (f->vlan.tpid == ETH_P_8021Q)
                                        set_bit(f->vlan.vid,
                                                adapter->vsi.active_cvlans);
index b0e29e34240185b3b4aa1d6bc32c74f95adedc4b..e809249500e18b15b4a339289bf7a8fca0bc323c 100644 (file)
@@ -509,6 +509,7 @@ enum ice_pf_flags {
        ICE_FLAG_VF_VLAN_PRUNING,
        ICE_FLAG_LINK_LENIENT_MODE_ENA,
        ICE_FLAG_PLUG_AUX_DEV,
+       ICE_FLAG_UNPLUG_AUX_DEV,
        ICE_FLAG_MTU_CHANGED,
        ICE_FLAG_GNSS,                  /* GNSS successfully initialized */
        ICE_PF_FLAGS_NBITS              /* must be last */
@@ -955,16 +956,11 @@ static inline void ice_set_rdma_cap(struct ice_pf *pf)
  */
 static inline void ice_clear_rdma_cap(struct ice_pf *pf)
 {
-       /* We can directly unplug aux device here only if the flag bit
-        * ICE_FLAG_PLUG_AUX_DEV is not set because ice_unplug_aux_dev()
-        * could race with ice_plug_aux_dev() called from
-        * ice_service_task(). In this case we only clear that bit now and
-        * aux device will be unplugged later once ice_plug_aux_device()
-        * called from ice_service_task() finishes (see ice_service_task()).
+       /* defer unplug to service task to avoid RTNL lock and
+        * clear PLUG bit so that pending plugs don't interfere
         */
-       if (!test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
-               ice_unplug_aux_dev(pf);
-
+       clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags);
+       set_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags);
        clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
 }
 #endif /* _ICE_H_ */
index 0f52ea38b6f3a2a49fdf5420a2b395cc1b27a16e..450317dfcca7360cb1acdfca470bd4295d11e3db 100644 (file)
@@ -291,6 +291,7 @@ static void ice_vsi_delete_from_hw(struct ice_vsi *vsi)
        struct ice_vsi_ctx *ctxt;
        int status;
 
+       ice_fltr_remove_all(vsi);
        ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
        if (!ctxt)
                return;
@@ -2892,7 +2893,6 @@ void ice_vsi_decfg(struct ice_vsi *vsi)
            !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
                ice_cfg_sw_lldp(vsi, false, false);
 
-       ice_fltr_remove_all(vsi);
        ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
        err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
        if (err)
index 567694bf098ba52684899ad593e6df448653a68b..0d8b8c6f9bd35184467df4680f9f5bc42e14760e 100644 (file)
@@ -2316,18 +2316,15 @@ static void ice_service_task(struct work_struct *work)
                }
        }
 
-       if (test_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) {
-               /* Plug aux device per request */
-               ice_plug_aux_dev(pf);
+       /* unplug aux dev per request, if an unplug request came in
+        * while processing a plug request, this will handle it
+        */
+       if (test_and_clear_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags))
+               ice_unplug_aux_dev(pf);
 
-               /* Mark plugging as done but check whether unplug was
-                * requested during ice_plug_aux_dev() call
-                * (e.g. from ice_clear_rdma_cap()) and if so then
-                * plug aux device.
-                */
-               if (!test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
-                       ice_unplug_aux_dev(pf);
-       }
+       /* Plug aux device per request */
+       if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
+               ice_plug_aux_dev(pf);
 
        if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) {
                struct iidc_event *event;
@@ -4644,6 +4641,12 @@ static int ice_start_eth(struct ice_vsi *vsi)
        return err;
 }
 
+static void ice_stop_eth(struct ice_vsi *vsi)
+{
+       ice_fltr_remove_all(vsi);
+       ice_vsi_close(vsi);
+}
+
 static int ice_init_eth(struct ice_pf *pf)
 {
        struct ice_vsi *vsi = ice_get_main_vsi(pf);
@@ -5132,7 +5135,7 @@ void ice_unload(struct ice_pf *pf)
 {
        ice_deinit_features(pf);
        ice_deinit_rdma(pf);
-       ice_vsi_close(ice_get_main_vsi(pf));
+       ice_stop_eth(ice_get_main_vsi(pf));
        ice_vsi_decfg(ice_get_main_vsi(pf));
        ice_deinit_dev(pf);
 }
index 96a64c25e2ef7f7d796c55bbd46e6a68bc84fd74..0cc05e54a78154307bf7d9ed32f8ce6e4c31be35 100644 (file)
@@ -1341,15 +1341,15 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
        struct ice_vf *vf;
        int ret;
 
+       vf = ice_get_vf_by_id(pf, vf_id);
+       if (!vf)
+               return -EINVAL;
+
        if (ice_is_eswitch_mode_switchdev(pf)) {
                dev_info(ice_pf_to_dev(pf), "Trusted VF is forbidden in switchdev mode\n");
                return -EOPNOTSUPP;
        }
 
-       vf = ice_get_vf_by_id(pf, vf_id);
-       if (!vf)
-               return -EINVAL;
-
        ret = ice_check_vf_ready_for_cfg(vf);
        if (ret)
                goto out_put_vf;
index dfd22862e926ba28a163c765a6f287ffb9b5e611..b61dd9f015405983ea2a8f029c30f5fc2ed964dd 100644 (file)
@@ -1210,6 +1210,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
                                ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
                        if (++ntc == cnt)
                                ntc = 0;
+                       rx_ring->first_desc = ntc;
                        continue;
                }
 
index 31565bbafa224f9cf2035f8c6439ed58f7bcd65a..d1e489da7363f26ecfb723fcc75423a8faa1efdd 100644 (file)
@@ -184,8 +184,6 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
        }
        netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
 
-       ice_qvec_dis_irq(vsi, rx_ring, q_vector);
-
        ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
        err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
        if (err)
@@ -200,10 +198,11 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
                if (err)
                        return err;
        }
+       ice_qvec_dis_irq(vsi, rx_ring, q_vector);
+
        err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
        if (err)
                return err;
-       ice_clean_rx_ring(rx_ring);
 
        ice_qvec_toggle_napi(vsi, q_vector, false);
        ice_qp_clean_rings(vsi, q_idx);
index 03bc1e8af575f72a6fd3a97cad6b15c2cf5dad09..274c781b554739fce9a98a551f9dfe4776515c29 100644 (file)
@@ -109,6 +109,7 @@ static void igb_free_all_rx_resources(struct igb_adapter *);
 static void igb_setup_mrqc(struct igb_adapter *);
 static int igb_probe(struct pci_dev *, const struct pci_device_id *);
 static void igb_remove(struct pci_dev *pdev);
+static void igb_init_queue_configuration(struct igb_adapter *adapter);
 static int igb_sw_init(struct igb_adapter *);
 int igb_open(struct net_device *);
 int igb_close(struct net_device *);
@@ -175,9 +176,7 @@ static void igb_nfc_filter_restore(struct igb_adapter *adapter);
 
 #ifdef CONFIG_PCI_IOV
 static int igb_vf_configure(struct igb_adapter *adapter, int vf);
-static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs);
-static int igb_disable_sriov(struct pci_dev *dev);
-static int igb_pci_disable_sriov(struct pci_dev *dev);
+static int igb_disable_sriov(struct pci_dev *dev, bool reinit);
 #endif
 
 static int igb_suspend(struct device *);
@@ -3665,7 +3664,7 @@ err_sw_init:
        kfree(adapter->shadow_vfta);
        igb_clear_interrupt_scheme(adapter);
 #ifdef CONFIG_PCI_IOV
-       igb_disable_sriov(pdev);
+       igb_disable_sriov(pdev, false);
 #endif
        pci_iounmap(pdev, adapter->io_addr);
 err_ioremap:
@@ -3679,7 +3678,38 @@ err_dma:
 }
 
 #ifdef CONFIG_PCI_IOV
-static int igb_disable_sriov(struct pci_dev *pdev)
+static int igb_sriov_reinit(struct pci_dev *dev)
+{
+       struct net_device *netdev = pci_get_drvdata(dev);
+       struct igb_adapter *adapter = netdev_priv(netdev);
+       struct pci_dev *pdev = adapter->pdev;
+
+       rtnl_lock();
+
+       if (netif_running(netdev))
+               igb_close(netdev);
+       else
+               igb_reset(adapter);
+
+       igb_clear_interrupt_scheme(adapter);
+
+       igb_init_queue_configuration(adapter);
+
+       if (igb_init_interrupt_scheme(adapter, true)) {
+               rtnl_unlock();
+               dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+               return -ENOMEM;
+       }
+
+       if (netif_running(netdev))
+               igb_open(netdev);
+
+       rtnl_unlock();
+
+       return 0;
+}
+
+static int igb_disable_sriov(struct pci_dev *pdev, bool reinit)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct igb_adapter *adapter = netdev_priv(netdev);
@@ -3713,10 +3743,10 @@ static int igb_disable_sriov(struct pci_dev *pdev)
                adapter->flags |= IGB_FLAG_DMAC;
        }
 
-       return 0;
+       return reinit ? igb_sriov_reinit(pdev) : 0;
 }
 
-static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
+static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs, bool reinit)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct igb_adapter *adapter = netdev_priv(netdev);
@@ -3781,12 +3811,6 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
                        "Unable to allocate memory for VF MAC filter list\n");
        }
 
-       /* only call pci_enable_sriov() if no VFs are allocated already */
-       if (!old_vfs) {
-               err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
-               if (err)
-                       goto err_out;
-       }
        dev_info(&pdev->dev, "%d VFs allocated\n",
                 adapter->vfs_allocated_count);
        for (i = 0; i < adapter->vfs_allocated_count; i++)
@@ -3794,6 +3818,17 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
 
        /* DMA Coalescing is not supported in IOV mode. */
        adapter->flags &= ~IGB_FLAG_DMAC;
+
+       if (reinit) {
+               err = igb_sriov_reinit(pdev);
+               if (err)
+                       goto err_out;
+       }
+
+       /* only call pci_enable_sriov() if no VFs are allocated already */
+       if (!old_vfs)
+               err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
+
        goto out;
 
 err_out:
@@ -3863,9 +3898,7 @@ static void igb_remove(struct pci_dev *pdev)
        igb_release_hw_control(adapter);
 
 #ifdef CONFIG_PCI_IOV
-       rtnl_lock();
-       igb_disable_sriov(pdev);
-       rtnl_unlock();
+       igb_disable_sriov(pdev, false);
 #endif
 
        unregister_netdev(netdev);
@@ -3911,7 +3944,7 @@ static void igb_probe_vfs(struct igb_adapter *adapter)
        igb_reset_interrupt_capability(adapter);
 
        pci_sriov_set_totalvfs(pdev, 7);
-       igb_enable_sriov(pdev, max_vfs);
+       igb_enable_sriov(pdev, max_vfs, false);
 
 #endif /* CONFIG_PCI_IOV */
 }
@@ -9520,71 +9553,17 @@ static void igb_shutdown(struct pci_dev *pdev)
        }
 }
 
-#ifdef CONFIG_PCI_IOV
-static int igb_sriov_reinit(struct pci_dev *dev)
-{
-       struct net_device *netdev = pci_get_drvdata(dev);
-       struct igb_adapter *adapter = netdev_priv(netdev);
-       struct pci_dev *pdev = adapter->pdev;
-
-       rtnl_lock();
-
-       if (netif_running(netdev))
-               igb_close(netdev);
-       else
-               igb_reset(adapter);
-
-       igb_clear_interrupt_scheme(adapter);
-
-       igb_init_queue_configuration(adapter);
-
-       if (igb_init_interrupt_scheme(adapter, true)) {
-               rtnl_unlock();
-               dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
-               return -ENOMEM;
-       }
-
-       if (netif_running(netdev))
-               igb_open(netdev);
-
-       rtnl_unlock();
-
-       return 0;
-}
-
-static int igb_pci_disable_sriov(struct pci_dev *dev)
-{
-       int err = igb_disable_sriov(dev);
-
-       if (!err)
-               err = igb_sriov_reinit(dev);
-
-       return err;
-}
-
-static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs)
-{
-       int err = igb_enable_sriov(dev, num_vfs);
-
-       if (err)
-               goto out;
-
-       err = igb_sriov_reinit(dev);
-       if (!err)
-               return num_vfs;
-
-out:
-       return err;
-}
-
-#endif
 static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
 {
 #ifdef CONFIG_PCI_IOV
-       if (num_vfs == 0)
-               return igb_pci_disable_sriov(dev);
-       else
-               return igb_pci_enable_sriov(dev, num_vfs);
+       int err;
+
+       if (num_vfs == 0) {
+               return igb_disable_sriov(dev, true);
+       } else {
+               err = igb_enable_sriov(dev, num_vfs, true);
+               return err ? err : num_vfs;
+       }
 #endif
        return 0;
 }
index 3a32809510fc6b771c2a350e5bed9aa3a5bb1a48..72cb1b56e9f2400b5408d4a722171ffcb363e4b9 100644 (file)
@@ -1074,7 +1074,7 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter)
                          igbvf_intr_msix_rx, 0, adapter->rx_ring->name,
                          netdev);
        if (err)
-               goto out;
+               goto free_irq_tx;
 
        adapter->rx_ring->itr_register = E1000_EITR(vector);
        adapter->rx_ring->itr_val = adapter->current_itr;
@@ -1083,10 +1083,14 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter)
        err = request_irq(adapter->msix_entries[vector].vector,
                          igbvf_msix_other, 0, netdev->name, netdev);
        if (err)
-               goto out;
+               goto free_irq_rx;
 
        igbvf_configure_msix(adapter);
        return 0;
+free_irq_rx:
+       free_irq(adapter->msix_entries[--vector].vector, netdev);
+free_irq_tx:
+       free_irq(adapter->msix_entries[--vector].vector, netdev);
 out:
        return err;
 }
index b8ba3f94c363229b230d3c8dbe1a882149e6e265..a47a2e3e548cf3f2e5241ca3180e816f7ebb626a 100644 (file)
@@ -1,6 +1,8 @@
 // SPDX-License-Identifier: GPL-2.0
 /* Copyright(c) 2009 - 2018 Intel Corporation. */
 
+#include <linux/etherdevice.h>
+
 #include "vf.h"
 
 static s32 e1000_check_for_link_vf(struct e1000_hw *hw);
@@ -131,11 +133,16 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw)
                /* set our "perm_addr" based on info provided by PF */
                ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
                if (!ret_val) {
-                       if (msgbuf[0] == (E1000_VF_RESET |
-                                         E1000_VT_MSGTYPE_ACK))
+                       switch (msgbuf[0]) {
+                       case E1000_VF_RESET | E1000_VT_MSGTYPE_ACK:
                                memcpy(hw->mac.perm_addr, addr, ETH_ALEN);
-                       else
+                               break;
+                       case E1000_VF_RESET | E1000_VT_MSGTYPE_NACK:
+                               eth_zero_addr(hw->mac.perm_addr);
+                               break;
+                       default:
                                ret_val = -E1000_ERR_MAC_INIT;
+                       }
                }
        }
 
index 2928a6c73692871a225cab659a0c0c5adf99af8b..25fc6c65209bf948887311ab10ae65d02a2b0ae9 100644 (file)
@@ -6010,18 +6010,18 @@ static bool validate_schedule(struct igc_adapter *adapter,
                if (e->command != TC_TAPRIO_CMD_SET_GATES)
                        return false;
 
-               for (i = 0; i < adapter->num_tx_queues; i++) {
-                       if (e->gate_mask & BIT(i))
+               for (i = 0; i < adapter->num_tx_queues; i++)
+                       if (e->gate_mask & BIT(i)) {
                                queue_uses[i]++;
 
-                       /* There are limitations: A single queue cannot be
-                        * opened and closed multiple times per cycle unless the
-                        * gate stays open. Check for it.
-                        */
-                       if (queue_uses[i] > 1 &&
-                           !(prev->gate_mask & BIT(i)))
-                               return false;
-               }
+                               /* There are limitations: A single queue cannot
+                                * be opened and closed multiple times per cycle
+                                * unless the gate stays open. Check for it.
+                                */
+                               if (queue_uses[i] > 1 &&
+                                   !(prev->gate_mask & BIT(i)))
+                                       return false;
+                       }
        }
 
        return true;
index 9b4ecbe4f36d41b3855b70243de9e4a4994b0ea7..3ea00bc9b91caf1fcf293e11448281f48fe3edee 100644 (file)
@@ -4996,6 +4996,14 @@ static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu)
 
        for (i = 0; i < priv->port_count; i++) {
                port = priv->port_list[i];
+               if (percpu && port->ntxqs >= num_possible_cpus() * 2)
+                       xdp_set_features_flag(port->dev,
+                                             NETDEV_XDP_ACT_BASIC |
+                                             NETDEV_XDP_ACT_REDIRECT |
+                                             NETDEV_XDP_ACT_NDO_XMIT);
+               else
+                       xdp_clear_features_flag(port->dev);
+
                mvpp2_swf_bm_pool_init(port);
                if (status[i])
                        mvpp2_open(port->dev);
@@ -6863,13 +6871,14 @@ static int mvpp2_port_probe(struct platform_device *pdev,
 
        if (!port->priv->percpu_pools)
                mvpp2_set_hw_csum(port, port->pool_long->id);
+       else if (port->ntxqs >= num_possible_cpus() * 2)
+               dev->xdp_features = NETDEV_XDP_ACT_BASIC |
+                                   NETDEV_XDP_ACT_REDIRECT |
+                                   NETDEV_XDP_ACT_NDO_XMIT;
 
        dev->vlan_features |= features;
        netif_set_tso_max_segs(dev, MVPP2_MAX_TSO_SEGS);
 
-       dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
-                           NETDEV_XDP_ACT_NDO_XMIT;
-
        dev->priv_flags |= IFF_UNICAST_FLT;
 
        /* MTU range: 68 - 9704 */
index 7f8ffbf79cf742905b2d6e5cfbd426a18b8f7bd8..ab126f8706c7491b1aee2e22c366ee1cb63ed1a2 100644 (file)
@@ -709,6 +709,7 @@ err_unreg_netdev:
 err_ptp_destroy:
        otx2_ptp_destroy(vf);
 err_detach_rsrc:
+       free_percpu(vf->hw.lmt_info);
        if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
                qmem_free(vf->dev, vf->dync_lmt);
        otx2_detach_resources(&vf->mbox);
@@ -762,6 +763,7 @@ static void otx2vf_remove(struct pci_dev *pdev)
        otx2_shutdown_tc(vf);
        otx2vf_disable_mbox_intr(vf);
        otx2_detach_resources(&vf->mbox);
+       free_percpu(vf->hw.lmt_info);
        if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
                qmem_free(vf->dev, vf->dync_lmt);
        otx2vf_vfaf_mbox_destroy(vf);
index b65de174c3d9b1c2a9d6d410a4db07762cb880e6..084a6badef6d9b4973a7413f474c6f9b68e31fb3 100644 (file)
 #define SGMII_SEND_AN_ERROR_EN         BIT(11)
 #define SGMII_IF_MODE_MASK             GENMASK(5, 1)
 
+/* Register to reset SGMII design */
+#define SGMII_RESERVED_0       0x34
+#define SGMII_SW_RESET         BIT(0)
+
 /* Register to set SGMII speed, ANA RG_ Control Signals III*/
 #define SGMSYS_ANA_RG_CS3      0x2028
 #define RG_PHY_SPEED_MASK      (BIT(2) | BIT(3))
index bb00de1003ac4355808b6805b3221270a7bfb909..83976dc86887589488e9769a4e7fc1f582523609 100644 (file)
@@ -38,20 +38,16 @@ static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
                          const unsigned long *advertising,
                          bool permit_pause_to_mac)
 {
+       bool mode_changed = false, changed, use_an;
        struct mtk_pcs *mpcs = pcs_to_mtk_pcs(pcs);
        unsigned int rgc3, sgm_mode, bmcr;
        int advertise, link_timer;
-       bool changed, use_an;
 
        advertise = phylink_mii_c22_pcs_encode_advertisement(interface,
                                                             advertising);
        if (advertise < 0)
                return advertise;
 
-       link_timer = phylink_get_link_timer_ns(interface);
-       if (link_timer < 0)
-               return link_timer;
-
        /* Clearing IF_MODE_BIT0 switches the PCS to BASE-X mode, and
         * we assume that fixes it's speed at bitrate = line rate (in
         * other words, 1000Mbps or 2500Mbps).
@@ -77,17 +73,24 @@ static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
        }
 
        if (use_an) {
-               /* FIXME: Do we need to set AN_RESTART here? */
-               bmcr = SGMII_AN_RESTART | SGMII_AN_ENABLE;
+               bmcr = SGMII_AN_ENABLE;
        } else {
                bmcr = 0;
        }
 
        if (mpcs->interface != interface) {
+               link_timer = phylink_get_link_timer_ns(interface);
+               if (link_timer < 0)
+                       return link_timer;
+
                /* PHYA power down */
                regmap_update_bits(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL,
                                   SGMII_PHYA_PWD, SGMII_PHYA_PWD);
 
+               /* Reset SGMII PCS state */
+               regmap_update_bits(mpcs->regmap, SGMII_RESERVED_0,
+                                  SGMII_SW_RESET, SGMII_SW_RESET);
+
                if (interface == PHY_INTERFACE_MODE_2500BASEX)
                        rgc3 = RG_PHY_SPEED_3_125G;
                else
@@ -97,16 +100,17 @@ static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
                regmap_update_bits(mpcs->regmap, mpcs->ana_rgc3,
                                   RG_PHY_SPEED_3_125G, rgc3);
 
+               /* Setup the link timer */
+               regmap_write(mpcs->regmap, SGMSYS_PCS_LINK_TIMER, link_timer / 2 / 8);
+
                mpcs->interface = interface;
+               mode_changed = true;
        }
 
        /* Update the advertisement, noting whether it has changed */
        regmap_update_bits_check(mpcs->regmap, SGMSYS_PCS_ADVERTISE,
                                 SGMII_ADVERTISE, advertise, &changed);
 
-       /* Setup the link timer and QPHY power up inside SGMIISYS */
-       regmap_write(mpcs->regmap, SGMSYS_PCS_LINK_TIMER, link_timer / 2 / 8);
-
        /* Update the sgmsys mode register */
        regmap_update_bits(mpcs->regmap, SGMSYS_SGMII_MODE,
                           SGMII_REMOTE_FAULT_DIS | SGMII_SPEED_DUPLEX_AN |
@@ -114,7 +118,7 @@ static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
 
        /* Update the BMCR */
        regmap_update_bits(mpcs->regmap, SGMSYS_PCS_CONTROL_1,
-                          SGMII_AN_RESTART | SGMII_AN_ENABLE, bmcr);
+                          SGMII_AN_ENABLE, bmcr);
 
        /* Release PHYA power down state
         * Only removing bit SGMII_PHYA_PWD isn't enough.
@@ -128,7 +132,7 @@ static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
        usleep_range(50, 100);
        regmap_write(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, 0);
 
-       return changed;
+       return changed || mode_changed;
 }
 
 static void mtk_pcs_restart_an(struct phylink_pcs *pcs)
index 0869d4fff17b1aa49dff0d4a6b21a8868afbd30d..4b5e459b6d49fee01508e0fff1e203ba3d1cee74 100644 (file)
@@ -674,7 +674,7 @@ int mlx4_en_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
        struct mlx4_en_xdp_buff *_ctx = (void *)ctx;
 
        if (unlikely(_ctx->ring->hwtstamp_rx_filter != HWTSTAMP_FILTER_ALL))
-               return -EOPNOTSUPP;
+               return -ENODATA;
 
        *timestamp = mlx4_en_get_hwtstamp(_ctx->mdev,
                                          mlx4_en_get_cqe_ts(_ctx->cqe));
@@ -686,7 +686,7 @@ int mlx4_en_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash)
        struct mlx4_en_xdp_buff *_ctx = (void *)ctx;
 
        if (unlikely(!(_ctx->dev->features & NETIF_F_RXHASH)))
-               return -EOPNOTSUPP;
+               return -ENODATA;
 
        *hash = be32_to_cpu(_ctx->cqe->immed_rss_invalid);
        return 0;
index 88460b7796e5574176a1e4f229cd0541050ac02d..4a19ef4a9811066988897c632e19e37e45d38b2e 100644 (file)
@@ -313,7 +313,6 @@ struct mlx5e_params {
                } channel;
        } mqprio;
        bool rx_cqe_compress_def;
-       bool tunneled_offload_en;
        struct dim_cq_moder rx_cq_moderation;
        struct dim_cq_moder tx_cq_moderation;
        struct mlx5e_packet_merge_param packet_merge;
@@ -1243,6 +1242,7 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
 void mlx5e_rx_dim_work(struct work_struct *work);
 void mlx5e_tx_dim_work(struct work_struct *work);
 
+void mlx5e_set_xdp_feature(struct net_device *netdev);
 netdev_features_t mlx5e_features_check(struct sk_buff *skb,
                                       struct net_device *netdev,
                                       netdev_features_t features);
index c4378afdec09e43c02813e3a2f88ccc93170e56e..1bd1c94fb977669ee01600feff6091247fcafbc7 100644 (file)
@@ -178,7 +178,6 @@ tc_act_police_stats(struct mlx5e_priv *priv,
        meter = mlx5e_tc_meter_get(priv->mdev, &params);
        if (IS_ERR(meter)) {
                NL_SET_ERR_MSG_MOD(fl_act->extack, "Failed to get flow meter");
-               mlx5_core_err(priv->mdev, "Failed to get flow meter %d\n", params.index);
                return PTR_ERR(meter);
        }
 
index 626cb7470fa572bb540ff1d83b300b6fccc3e950..07c1895a2b23414f2a9b5bf5e1454757951624c3 100644 (file)
@@ -64,6 +64,7 @@ mlx5e_tc_act_stats_add(struct mlx5e_tc_act_stats_handle *handle,
 {
        struct mlx5e_tc_act_stats *act_stats, *old_act_stats;
        struct rhashtable *ht = &handle->ht;
+       u64 lastused;
        int err = 0;
 
        act_stats = kvzalloc(sizeof(*act_stats), GFP_KERNEL);
@@ -73,6 +74,10 @@ mlx5e_tc_act_stats_add(struct mlx5e_tc_act_stats_handle *handle,
        act_stats->tc_act_cookie = act_cookie;
        act_stats->counter = counter;
 
+       mlx5_fc_query_cached_raw(counter,
+                                &act_stats->lastbytes,
+                                &act_stats->lastpackets, &lastused);
+
        rcu_read_lock();
        old_act_stats = rhashtable_lookup_get_insert_fast(ht,
                                                          &act_stats->hash,
index bcd6370de440f2d3c9f22509750c51c3899c382b..c5dae48b7932f7a6216f55cb3b14faa7d167e1c8 100644 (file)
@@ -162,7 +162,7 @@ static int mlx5e_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
        const struct mlx5e_xdp_buff *_ctx = (void *)ctx;
 
        if (unlikely(!mlx5e_rx_hw_stamp(_ctx->rq->tstamp)))
-               return -EOPNOTSUPP;
+               return -ENODATA;
 
        *timestamp =  mlx5e_cqe_ts_to_ns(_ctx->rq->ptp_cyc2time,
                                         _ctx->rq->clock, get_cqe_ts(_ctx->cqe));
@@ -174,7 +174,7 @@ static int mlx5e_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash)
        const struct mlx5e_xdp_buff *_ctx = (void *)ctx;
 
        if (unlikely(!(_ctx->xdp.rxq->dev->features & NETIF_F_RXHASH)))
-               return -EOPNOTSUPP;
+               return -ENODATA;
 
        *hash = be32_to_cpu(_ctx->cqe->rss_hash_result);
        return 0;
index 4be770443b0cd2a22df63c07c34ebbec4847678a..9b597cb2459851aa9a3dd46f4757e94ba87c7f70 100644 (file)
@@ -621,15 +621,6 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
        if (unlikely(!priv_rx))
                return -ENOMEM;
 
-       dek = mlx5_ktls_create_key(priv->tls->dek_pool, crypto_info);
-       if (IS_ERR(dek)) {
-               err = PTR_ERR(dek);
-               goto err_create_key;
-       }
-       priv_rx->dek = dek;
-
-       INIT_LIST_HEAD(&priv_rx->list);
-       spin_lock_init(&priv_rx->lock);
        switch (crypto_info->cipher_type) {
        case TLS_CIPHER_AES_GCM_128:
                priv_rx->crypto_info.crypto_info_128 =
@@ -642,9 +633,20 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
        default:
                WARN_ONCE(1, "Unsupported cipher type %u\n",
                          crypto_info->cipher_type);
-               return -EOPNOTSUPP;
+               err = -EOPNOTSUPP;
+               goto err_cipher_type;
        }
 
+       dek = mlx5_ktls_create_key(priv->tls->dek_pool, crypto_info);
+       if (IS_ERR(dek)) {
+               err = PTR_ERR(dek);
+               goto err_cipher_type;
+       }
+       priv_rx->dek = dek;
+
+       INIT_LIST_HEAD(&priv_rx->list);
+       spin_lock_init(&priv_rx->lock);
+
        rxq = mlx5e_ktls_sk_get_rxq(sk);
        priv_rx->rxq = rxq;
        priv_rx->sk = sk;
@@ -677,7 +679,7 @@ err_post_wqes:
        mlx5e_tir_destroy(&priv_rx->tir);
 err_create_tir:
        mlx5_ktls_destroy_key(priv->tls->dek_pool, priv_rx->dek);
-err_create_key:
+err_cipher_type:
        kfree(priv_rx);
        return err;
 }
index 60b3e08a10286e2285262a6435dacb0ea5265d07..0e4c0a093293a7e8872a7c751bfd8d5373f80935 100644 (file)
@@ -469,14 +469,6 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
        if (IS_ERR(priv_tx))
                return PTR_ERR(priv_tx);
 
-       dek = mlx5_ktls_create_key(priv->tls->dek_pool, crypto_info);
-       if (IS_ERR(dek)) {
-               err = PTR_ERR(dek);
-               goto err_create_key;
-       }
-       priv_tx->dek = dek;
-
-       priv_tx->expected_seq = start_offload_tcp_sn;
        switch (crypto_info->cipher_type) {
        case TLS_CIPHER_AES_GCM_128:
                priv_tx->crypto_info.crypto_info_128 =
@@ -489,8 +481,18 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
        default:
                WARN_ONCE(1, "Unsupported cipher type %u\n",
                          crypto_info->cipher_type);
-               return -EOPNOTSUPP;
+               err = -EOPNOTSUPP;
+               goto err_pool_push;
        }
+
+       dek = mlx5_ktls_create_key(priv->tls->dek_pool, crypto_info);
+       if (IS_ERR(dek)) {
+               err = PTR_ERR(dek);
+               goto err_pool_push;
+       }
+
+       priv_tx->dek = dek;
+       priv_tx->expected_seq = start_offload_tcp_sn;
        priv_tx->tx_ctx = tls_offload_ctx_tx(tls_ctx);
 
        mlx5e_set_ktls_tx_priv_ctx(tls_ctx, priv_tx);
@@ -500,7 +502,7 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
 
        return 0;
 
-err_create_key:
+err_pool_push:
        pool_push(pool, priv_tx);
        return err;
 }
index 08d0929e82603679709d4d425d50a8ff3a95bdc9..33b3620ea45c25f7563cb5b177c7d9575fcac9df 100644 (file)
@@ -89,8 +89,8 @@ struct mlx5e_macsec_rx_sc {
 };
 
 struct mlx5e_macsec_umr {
+       u8 __aligned(64) ctx[MLX5_ST_SZ_BYTES(macsec_aso)];
        dma_addr_t dma_addr;
-       u8 ctx[MLX5_ST_SZ_BYTES(macsec_aso)];
        u32 mkey;
 };
 
@@ -1412,6 +1412,7 @@ static int macsec_aso_query(struct mlx5_core_dev *mdev, struct mlx5e_macsec *mac
        struct mlx5e_macsec_aso *aso;
        struct mlx5_aso_wqe *aso_wqe;
        struct mlx5_aso *maso;
+       unsigned long expires;
        int err;
 
        aso = &macsec->aso;
@@ -1425,7 +1426,13 @@ static int macsec_aso_query(struct mlx5_core_dev *mdev, struct mlx5e_macsec *mac
        macsec_aso_build_wqe_ctrl_seg(aso, &aso_wqe->aso_ctrl, NULL);
 
        mlx5_aso_post_wqe(maso, false, &aso_wqe->ctrl);
-       err = mlx5_aso_poll_cq(maso, false);
+       expires = jiffies + msecs_to_jiffies(10);
+       do {
+               err = mlx5_aso_poll_cq(maso, false);
+               if (err)
+                       usleep_range(2, 10);
+       } while (err && time_is_after_jiffies(expires));
+
        if (err)
                goto err_out;
 
index 2449731b7d79a715c3d07a923bb4719cf299bd25..89de92d0648363a899793f1aa5ea4fb324a9ddc8 100644 (file)
@@ -117,12 +117,14 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
        if (!MLX5_CAP_GEN(priv->mdev, ets))
                return -EOPNOTSUPP;
 
-       ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
-       for (i = 0; i < ets->ets_cap; i++) {
+       for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
                err = mlx5_query_port_prio_tc(mdev, i, &ets->prio_tc[i]);
                if (err)
                        return err;
+       }
 
+       ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
+       for (i = 0; i < ets->ets_cap; i++) {
                err = mlx5_query_port_tc_group(mdev, i, &tc_group[i]);
                if (err)
                        return err;
index 7708acc9b2ab3a274f444e63bdfbb7278ded6ed5..79fd21ecb9cbc70c7ff47d54e7433265b608439e 100644 (file)
@@ -1985,6 +1985,7 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable)
        struct mlx5e_priv *priv = netdev_priv(netdev);
        struct mlx5_core_dev *mdev = priv->mdev;
        struct mlx5e_params new_params;
+       int err;
 
        if (enable) {
                /* Checking the regular RQ here; mlx5e_validate_xsk_param called
@@ -2005,7 +2006,14 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable)
        MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_STRIDING_RQ, enable);
        mlx5e_set_rq_type(mdev, &new_params);
 
-       return mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true);
+       err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true);
+       if (err)
+               return err;
+
+       /* update XDP supported features */
+       mlx5e_set_xdp_feature(netdev);
+
+       return 0;
 }
 
 static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable)
index 76a9c5194a7046a6867414d9240ae9dbc97db68f..7ca7e9b57607fdfa2484cf374d5b2cfa7f731bf3 100644 (file)
@@ -4004,6 +4004,25 @@ static int mlx5e_handle_feature(struct net_device *netdev,
        return 0;
 }
 
+void mlx5e_set_xdp_feature(struct net_device *netdev)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+       struct mlx5e_params *params = &priv->channels.params;
+       xdp_features_t val;
+
+       if (params->packet_merge.type != MLX5E_PACKET_MERGE_NONE) {
+               xdp_clear_features_flag(netdev);
+               return;
+       }
+
+       val = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+             NETDEV_XDP_ACT_XSK_ZEROCOPY |
+             NETDEV_XDP_ACT_NDO_XMIT;
+       if (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC)
+               val |= NETDEV_XDP_ACT_RX_SG;
+       xdp_set_features_flag(netdev, val);
+}
+
 int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
 {
        netdev_features_t oper_features = features;
@@ -4030,6 +4049,9 @@ int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
                return -EINVAL;
        }
 
+       /* update XDP supported features */
+       mlx5e_set_xdp_feature(netdev);
+
        return 0;
 }
 
@@ -4128,8 +4150,12 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
                }
        }
 
-       if (mlx5e_is_uplink_rep(priv))
+       if (mlx5e_is_uplink_rep(priv)) {
                features = mlx5e_fix_uplink_rep_features(netdev, features);
+               features |= NETIF_F_NETNS_LOCAL;
+       } else {
+               features &= ~NETIF_F_NETNS_LOCAL;
+       }
 
        mutex_unlock(&priv->state_lock);
 
@@ -4147,13 +4173,17 @@ static bool mlx5e_xsk_validate_mtu(struct net_device *netdev,
                struct xsk_buff_pool *xsk_pool =
                        mlx5e_xsk_get_pool(&chs->params, chs->params.xsk, ix);
                struct mlx5e_xsk_param xsk;
+               int max_xdp_mtu;
 
                if (!xsk_pool)
                        continue;
 
                mlx5e_build_xsk_param(xsk_pool, &xsk);
+               max_xdp_mtu = mlx5e_xdp_max_mtu(new_params, &xsk);
 
-               if (!mlx5e_validate_xsk_param(new_params, &xsk, mdev)) {
+               /* Validate XSK params and XDP MTU in advance */
+               if (!mlx5e_validate_xsk_param(new_params, &xsk, mdev) ||
+                   new_params->sw_mtu > max_xdp_mtu) {
                        u32 hr = mlx5e_get_linear_rq_headroom(new_params, &xsk);
                        int max_mtu_frame, max_mtu_page, max_mtu;
 
@@ -4163,9 +4193,9 @@ static bool mlx5e_xsk_validate_mtu(struct net_device *netdev,
                         */
                        max_mtu_frame = MLX5E_HW2SW_MTU(new_params, xsk.chunk_size - hr);
                        max_mtu_page = MLX5E_HW2SW_MTU(new_params, SKB_MAX_HEAD(0));
-                       max_mtu = min(max_mtu_frame, max_mtu_page);
+                       max_mtu = min3(max_mtu_frame, max_mtu_page, max_xdp_mtu);
 
-                       netdev_err(netdev, "MTU %d is too big for an XSK running on channel %u. Try MTU <= %d\n",
+                       netdev_err(netdev, "MTU %d is too big for an XSK running on channel %u or its redirection XDP program. Try MTU <= %d\n",
                                   new_params->sw_mtu, ix, max_mtu);
                        return false;
                }
@@ -4761,13 +4791,6 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
        if (old_prog)
                bpf_prog_put(old_prog);
 
-       if (reset) {
-               if (prog)
-                       xdp_features_set_redirect_target(netdev, true);
-               else
-                       xdp_features_clear_redirect_target(netdev);
-       }
-
        if (!test_bit(MLX5E_STATE_OPENED, &priv->state) || reset)
                goto unlock;
 
@@ -4964,8 +4987,6 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
        /* TX inline */
        mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
 
-       params->tunneled_offload_en = mlx5_tunnel_inner_ft_supported(mdev);
-
        /* AF_XDP */
        params->xsk = xsk;
 
@@ -5163,13 +5184,10 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
        netdev->features         |= NETIF_F_HIGHDMA;
        netdev->features         |= NETIF_F_HW_VLAN_STAG_FILTER;
 
-       netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
-                              NETDEV_XDP_ACT_XSK_ZEROCOPY |
-                              NETDEV_XDP_ACT_RX_SG;
-
        netdev->priv_flags       |= IFF_UNICAST_FLT;
 
        netif_set_tso_max_size(netdev, GSO_MAX_SIZE);
+       mlx5e_set_xdp_feature(netdev);
        mlx5e_set_netdev_dev_addr(netdev);
        mlx5e_macsec_build_netdev(priv);
        mlx5e_ipsec_build_netdev(priv);
@@ -5241,6 +5259,9 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
                mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
 
        mlx5e_health_create_reporters(priv);
+       /* update XDP supported features */
+       mlx5e_set_xdp_feature(netdev);
+
        return 0;
 }
 
@@ -5270,7 +5291,7 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
        }
 
        features = MLX5E_RX_RES_FEATURE_PTP;
-       if (priv->channels.params.tunneled_offload_en)
+       if (mlx5_tunnel_inner_ft_supported(mdev))
                features |= MLX5E_RX_RES_FEATURE_INNER_FT;
        err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, features,
                                priv->max_nch, priv->drop_rq.rqn,
index 9b92034430854759bc1d50c002c1b3810c99a542..8ff654b4e9e14101a5e0e65e0e6a32e22c48431a 100644 (file)
@@ -747,12 +747,14 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
        /* RQ */
        mlx5e_build_rq_params(mdev, params);
 
+       /* update XDP supported features */
+       mlx5e_set_xdp_feature(netdev);
+
        /* CQ moderation params */
        params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
        mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
 
        params->mqprio.num_tc       = 1;
-       params->tunneled_offload_en = false;
        if (rep->vport != MLX5_VPORT_UPLINK)
                params->vlan_strip_disable = true;
 
index 70b8d2dfa751f92267ebd5b7e094f95d181ca93a..87a2850b32d09baa5d0dc945825f1178118ba975 100644 (file)
@@ -1103,8 +1103,8 @@ static void
 mlx5e_hairpin_params_init(struct mlx5e_hairpin_params *hairpin_params,
                          struct mlx5_core_dev *mdev)
 {
+       u32 link_speed = 0;
        u64 link_speed64;
-       u32 link_speed;
 
        hairpin_params->mdev = mdev;
        /* set hairpin pair per each 50Gbs share of the link */
@@ -3752,7 +3752,7 @@ mlx5e_clone_flow_attr_for_post_act(struct mlx5_flow_attr *attr,
        parse_attr->filter_dev = attr->parse_attr->filter_dev;
        attr2->action = 0;
        attr2->counter = NULL;
-       attr->tc_act_cookies_count = 0;
+       attr2->tc_act_cookies_count = 0;
        attr2->flags = 0;
        attr2->parse_attr = parse_attr;
        attr2->dest_chain = 0;
@@ -4304,6 +4304,7 @@ int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv,
 
        esw_attr->dest_int_port = dest_int_port;
        esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE;
+       esw_attr->split_count = out_index;
 
        /* Forward to root fdb for matching against the new source vport */
        attr->dest_chain = 0;
@@ -5304,8 +5305,10 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
        mlx5e_tc_debugfs_init(tc, mlx5e_fs_get_debugfs_root(priv->fs));
 
        tc->action_stats_handle = mlx5e_tc_act_stats_create();
-       if (IS_ERR(tc->action_stats_handle))
+       if (IS_ERR(tc->action_stats_handle)) {
+               err = PTR_ERR(tc->action_stats_handle);
                goto err_act_stats;
+       }
 
        return 0;
 
@@ -5440,8 +5443,10 @@ int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv)
        }
 
        uplink_priv->action_stats_handle = mlx5e_tc_act_stats_create();
-       if (IS_ERR(uplink_priv->action_stats_handle))
+       if (IS_ERR(uplink_priv->action_stats_handle)) {
+               err = PTR_ERR(uplink_priv->action_stats_handle);
                goto err_action_counter;
+       }
 
        return 0;
 
@@ -5463,6 +5468,16 @@ err_tun_mapping:
 
 void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv)
 {
+       struct mlx5e_rep_priv *rpriv;
+       struct mlx5_eswitch *esw;
+       struct mlx5e_priv *priv;
+
+       rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
+       priv = netdev_priv(rpriv->netdev);
+       esw = priv->mdev->priv.eswitch;
+
+       mlx5e_tc_clean_fdb_peer_flows(esw);
+
        mlx5e_tc_tun_cleanup(uplink_priv->encap);
 
        mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
index d55775627a473a698638b2b2fff4e0c7e55a195b..50d2ea32397982884bb6618d1b1da53bf7ca53d5 100644 (file)
@@ -364,8 +364,7 @@ int mlx5_esw_acl_ingress_vport_metadata_update(struct mlx5_eswitch *esw, u16 vpo
 
        if (WARN_ON_ONCE(IS_ERR(vport))) {
                esw_warn(esw->dev, "vport(%d) invalid!\n", vport_num);
-               err = PTR_ERR(vport);
-               goto out;
+               return PTR_ERR(vport);
        }
 
        esw_acl_ingress_ofld_rules_destroy(esw, vport);
index 0f052513fefa104d882355dece4fdbb264d8277d..8bdf28762f41261118cabb26461a86879a42e227 100644 (file)
@@ -959,6 +959,7 @@ void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
         */
        esw_vport_change_handle_locked(vport);
        vport->enabled_events = 0;
+       esw_apply_vport_rx_mode(esw, vport, false, false);
        esw_vport_cleanup(esw, vport);
        esw->enabled_vports--;
 
index d766a64b18234ea810e1e268991fedfb2d5cbbe3..25a8076a77bff081900591d1748773983018594d 100644 (file)
@@ -723,11 +723,11 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
 
        flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
        for (i = 0; i < esw_attr->split_count; i++) {
-               if (esw_is_indir_table(esw, attr))
-                       err = esw_setup_indir_table(dest, &flow_act, esw, attr, false, &i);
-               else if (esw_is_chain_src_port_rewrite(esw, esw_attr))
-                       err = esw_setup_chain_src_port_rewrite(dest, &flow_act, esw, chains, attr,
-                                                              &i);
+               if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
+                       /* Source port rewrite (forward to ovs internal port or statck device) isn't
+                        * supported in the rule of split action.
+                        */
+                       err = -EOPNOTSUPP;
                else
                        esw_setup_vport_dest(dest, &flow_act, esw, esw_attr, i, i, false);
 
@@ -3405,6 +3405,18 @@ static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
        return 0;
 }
 
+static bool esw_offloads_devlink_ns_eq_netdev_ns(struct devlink *devlink)
+{
+       struct net *devl_net, *netdev_net;
+       struct mlx5_eswitch *esw;
+
+       esw = mlx5_devlink_eswitch_get(devlink);
+       netdev_net = dev_net(esw->dev->mlx5e_res.uplink_netdev);
+       devl_net = devlink_net(devlink);
+
+       return net_eq(devl_net, netdev_net);
+}
+
 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
                                  struct netlink_ext_ack *extack)
 {
@@ -3419,6 +3431,13 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
        if (esw_mode_from_devlink(mode, &mlx5_mode))
                return -EINVAL;
 
+       if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV &&
+           !esw_offloads_devlink_ns_eq_netdev_ns(devlink)) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "Can't change E-Switch mode to switchdev when netdev net namespace has diverged from the devlink's.");
+               return -EPERM;
+       }
+
        mlx5_lag_disable_change(esw->dev);
        err = mlx5_esw_try_lock(esw);
        if (err < 0) {
index c2a4f86bc8909526703840ff8566e2d7a74a0971..baa7ef812313996be093323cb1465af85ef8a5f1 100644 (file)
@@ -70,7 +70,6 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
 
        params->packet_merge.type = MLX5E_PACKET_MERGE_NONE;
        params->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN;
-       params->tunneled_offload_en = false;
 
        /* CQE compression is not supported for IPoIB */
        params->rx_cqe_compress_def = false;
index 540840e80493b083e9d784caa40ef78df4b4f731..f1de152a61135844f291af44a69d04df60a96bb6 100644 (file)
@@ -1364,8 +1364,8 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
 {
        mlx5_devlink_traps_unregister(priv_to_devlink(dev));
        mlx5_sf_dev_table_destroy(dev);
-       mlx5_sriov_detach(dev);
        mlx5_eswitch_disable(dev->priv.eswitch);
+       mlx5_sriov_detach(dev);
        mlx5_lag_remove_mdev(dev);
        mlx5_ec_cleanup(dev);
        mlx5_sf_hw_table_destroy(dev);
@@ -1789,11 +1789,11 @@ static void remove_one(struct pci_dev *pdev)
        struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
        struct devlink *devlink = priv_to_devlink(dev);
 
+       set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
        /* mlx5_drain_fw_reset() is using devlink APIs. Hence, we must drain
         * fw_reset before unregistering the devlink.
         */
        mlx5_drain_fw_reset(dev);
-       set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
        devlink_unregister(devlink);
        mlx5_sriov_disable(pdev);
        mlx5_crdump_disable(dev);
index 64d4e7125e9bb5e1da1246a8af27a2bab1bb0f4b..95dc67fb300157aa8071d40ce667d0e43dd1b7af 100644 (file)
@@ -82,6 +82,16 @@ static u16 func_id_to_type(struct mlx5_core_dev *dev, u16 func_id, bool ec_funct
        return func_id <= mlx5_core_max_vfs(dev) ?  MLX5_VF : MLX5_SF;
 }
 
+static u32 mlx5_get_ec_function(u32 function)
+{
+       return function >> 16;
+}
+
+static u32 mlx5_get_func_id(u32 function)
+{
+       return function & 0xffff;
+}
+
 static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function)
 {
        struct rb_root *root;
@@ -665,20 +675,22 @@ static int optimal_reclaimed_pages(void)
 }
 
 static int mlx5_reclaim_root_pages(struct mlx5_core_dev *dev,
-                                  struct rb_root *root, u16 func_id)
+                                  struct rb_root *root, u32 function)
 {
        u64 recl_pages_to_jiffies = msecs_to_jiffies(mlx5_tout_ms(dev, RECLAIM_PAGES));
        unsigned long end = jiffies + recl_pages_to_jiffies;
 
        while (!RB_EMPTY_ROOT(root)) {
+               u32 ec_function = mlx5_get_ec_function(function);
+               u32 function_id = mlx5_get_func_id(function);
                int nclaimed;
                int err;
 
-               err = reclaim_pages(dev, func_id, optimal_reclaimed_pages(),
-                                   &nclaimed, false, mlx5_core_is_ecpf(dev));
+               err = reclaim_pages(dev, function_id, optimal_reclaimed_pages(),
+                                   &nclaimed, false, ec_function);
                if (err) {
-                       mlx5_core_warn(dev, "failed reclaiming pages (%d) for func id 0x%x\n",
-                                      err, func_id);
+                       mlx5_core_warn(dev, "reclaim_pages err (%d) func_id=0x%x ec_func=0x%x\n",
+                                      err, function_id, ec_function);
                        return err;
                }
 
index c5240d38c9dbd83ca87f4b8802144cb7ed4a2018..09ed6e5fa6c34e6db76d160d36cb5df28b67ac8d 100644 (file)
@@ -105,7 +105,6 @@ struct mlxsw_thermal {
        struct thermal_zone_device *tzdev;
        int polling_delay;
        struct thermal_cooling_device *cdevs[MLXSW_MFCR_PWMS_MAX];
-       u8 cooling_levels[MLXSW_THERMAL_MAX_STATE + 1];
        struct thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
        struct mlxsw_cooling_states cooling_states[MLXSW_THERMAL_NUM_TRIPS];
        struct mlxsw_thermal_area line_cards[];
@@ -468,7 +467,7 @@ static int mlxsw_thermal_set_cur_state(struct thermal_cooling_device *cdev,
                return idx;
 
        /* Normalize the state to the valid speed range. */
-       state = thermal->cooling_levels[state];
+       state = max_t(unsigned long, MLXSW_THERMAL_MIN_STATE, state);
        mlxsw_reg_mfsc_pack(mfsc_pl, idx, mlxsw_state_to_duty(state));
        err = mlxsw_reg_write(thermal->core, MLXSW_REG(mfsc), mfsc_pl);
        if (err) {
@@ -859,10 +858,6 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
                }
        }
 
-       /* Initialize cooling levels per PWM state. */
-       for (i = 0; i < MLXSW_THERMAL_MAX_STATE; i++)
-               thermal->cooling_levels[i] = max(MLXSW_THERMAL_MIN_STATE, i);
-
        thermal->polling_delay = bus_info->low_frequency ?
                                 MLXSW_THERMAL_SLOW_POLL_INT :
                                 MLXSW_THERMAL_POLL_INT;
index a8f94b7544eeab04c8c53d74337245f7e6ebaf17..02a327744a61b6c84319b7d79804966e612e7dcc 100644 (file)
@@ -2937,6 +2937,7 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
 
 static void mlxsw_sp_parsing_init(struct mlxsw_sp *mlxsw_sp)
 {
+       refcount_set(&mlxsw_sp->parsing.parsing_depth_ref, 0);
        mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH;
        mlxsw_sp->parsing.vxlan_udp_dport = MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT;
        mutex_init(&mlxsw_sp->parsing.lock);
@@ -2945,6 +2946,7 @@ static void mlxsw_sp_parsing_init(struct mlxsw_sp *mlxsw_sp)
 static void mlxsw_sp_parsing_fini(struct mlxsw_sp *mlxsw_sp)
 {
        mutex_destroy(&mlxsw_sp->parsing.lock);
+       WARN_ON_ONCE(refcount_read(&mlxsw_sp->parsing.parsing_depth_ref));
 }
 
 struct mlxsw_sp_ipv6_addr_node {
index 045a24cacfa517e5ef79add5528876e2da93a8dd..b6ee2d658b0c43970a921e8cb73519fc9d046b16 100644 (file)
@@ -1354,7 +1354,7 @@ static int mlxsw_sp_fid_8021q_port_vid_map(struct mlxsw_sp_fid *fid,
                                           u16 vid)
 {
        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
-       u8 local_port = mlxsw_sp_port->local_port;
+       u16 local_port = mlxsw_sp_port->local_port;
        int err;
 
        /* In case there are no {Port, VID} => FID mappings on the port,
@@ -1391,7 +1391,7 @@ mlxsw_sp_fid_8021q_port_vid_unmap(struct mlxsw_sp_fid *fid,
                                  struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
 {
        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
-       u8 local_port = mlxsw_sp_port->local_port;
+       u16 local_port = mlxsw_sp_port->local_port;
 
        mlxsw_sp_fid_port_vid_list_del(fid, mlxsw_sp_port->local_port, vid);
        mlxsw_sp_fid_evid_map(fid, local_port, vid, false);
index 09e32778b012d18c4a15de40b025dbbd901abf8c..4a73e2fe95ef90422ea4bf7efaf7fd05942bd1ac 100644 (file)
@@ -10381,11 +10381,23 @@ err_reg_write:
                                              old_inc_parsing_depth);
        return err;
 }
+
+static void mlxsw_sp_mp_hash_fini(struct mlxsw_sp *mlxsw_sp)
+{
+       bool old_inc_parsing_depth = mlxsw_sp->router->inc_parsing_depth;
+
+       mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp, old_inc_parsing_depth,
+                                             false);
+}
 #else
 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
 {
        return 0;
 }
+
+static void mlxsw_sp_mp_hash_fini(struct mlxsw_sp *mlxsw_sp)
+{
+}
 #endif
 
 static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
@@ -10615,6 +10627,7 @@ err_register_inet6addr_notifier:
 err_register_inetaddr_notifier:
        mlxsw_core_flush_owq();
 err_dscp_init:
+       mlxsw_sp_mp_hash_fini(mlxsw_sp);
 err_mp_hash_init:
        mlxsw_sp_neigh_fini(mlxsw_sp);
 err_neigh_init:
@@ -10655,6 +10668,7 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
        unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
        unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
        mlxsw_core_flush_owq();
+       mlxsw_sp_mp_hash_fini(mlxsw_sp);
        mlxsw_sp_neigh_fini(mlxsw_sp);
        mlxsw_sp_lb_rif_fini(mlxsw_sp);
        mlxsw_sp_vrs_fini(mlxsw_sp);
index bdb893476832bd6da6032f23742be033fa741f52..d0e6cd8dbe5c94c666823be41f670bd8bc02c827 100644 (file)
@@ -258,6 +258,7 @@ struct ocelot_stat_layout {
 struct ocelot_stats_region {
        struct list_head node;
        u32 base;
+       enum ocelot_stat first_stat;
        int count;
        u32 *buf;
 };
@@ -273,6 +274,7 @@ static const struct ocelot_stat_layout ocelot_mm_stats_layout[OCELOT_NUM_STATS]
        OCELOT_STAT(RX_ASSEMBLY_OK),
        OCELOT_STAT(RX_MERGE_FRAGMENTS),
        OCELOT_STAT(TX_MERGE_FRAGMENTS),
+       OCELOT_STAT(TX_MM_HOLD),
        OCELOT_STAT(RX_PMAC_OCTETS),
        OCELOT_STAT(RX_PMAC_UNICAST),
        OCELOT_STAT(RX_PMAC_MULTICAST),
@@ -341,11 +343,12 @@ static int ocelot_port_update_stats(struct ocelot *ocelot, int port)
  */
 static void ocelot_port_transfer_stats(struct ocelot *ocelot, int port)
 {
-       unsigned int idx = port * OCELOT_NUM_STATS;
        struct ocelot_stats_region *region;
        int j;
 
        list_for_each_entry(region, &ocelot->stats_regions, node) {
+               unsigned int idx = port * OCELOT_NUM_STATS + region->first_stat;
+
                for (j = 0; j < region->count; j++) {
                        u64 *stat = &ocelot->stats[idx + j];
                        u64 val = region->buf[j];
@@ -355,8 +358,6 @@ static void ocelot_port_transfer_stats(struct ocelot *ocelot, int port)
 
                        *stat = (*stat & ~(u64)U32_MAX) + val;
                }
-
-               idx += region->count;
        }
 }
 
@@ -899,7 +900,8 @@ static int ocelot_prepare_stats_regions(struct ocelot *ocelot)
                if (!layout[i].reg)
                        continue;
 
-               if (region && layout[i].reg == last + 4) {
+               if (region && ocelot->map[SYS][layout[i].reg & REG_MASK] ==
+                   ocelot->map[SYS][last & REG_MASK] + 4) {
                        region->count++;
                } else {
                        region = devm_kzalloc(ocelot->dev, sizeof(*region),
@@ -914,6 +916,7 @@ static int ocelot_prepare_stats_regions(struct ocelot *ocelot)
                        WARN_ON(last >= layout[i].reg);
 
                        region->base = layout[i].reg;
+                       region->first_stat = i;
                        region->count = 1;
                        list_add_tail(&region->node, &ocelot->stats_regions);
                }
index d17d1b4f2585fed3e16ab0208a441a3aaf5bc025..825356ee3492ec62375f85255009bb51a04317ce 100644 (file)
@@ -292,7 +292,7 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
         */
 
        laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE);
-       if (!laddr) {
+       if (dma_mapping_error(lp->device, laddr)) {
                pr_err_ratelimited("%s: failed to map tx DMA buffer.\n", dev->name);
                dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
@@ -509,7 +509,7 @@ static bool sonic_alloc_rb(struct net_device *dev, struct sonic_local *lp,
 
        *new_addr = dma_map_single(lp->device, skb_put(*new_skb, SONIC_RBSIZE),
                                   SONIC_RBSIZE, DMA_FROM_DEVICE);
-       if (!*new_addr) {
+       if (dma_mapping_error(lp->device, *new_addr)) {
                dev_kfree_skb(*new_skb);
                *new_skb = NULL;
                return false;
index d61cd32ec3b6575d15b399de16d222314f794c55..86a93cac26470d9ae9feff3cf70006123892ea1e 100644 (file)
@@ -5083,6 +5083,11 @@ static int qed_init_wfq_param(struct qed_hwfn *p_hwfn,
 
        num_vports = p_hwfn->qm_info.num_vports;
 
+       if (num_vports < 2) {
+               DP_NOTICE(p_hwfn, "Unexpected num_vports: %d\n", num_vports);
+               return -EINVAL;
+       }
+
        /* Accounting for the vports which are configured for WFQ explicitly */
        for (i = 0; i < num_vports; i++) {
                u32 tmp_speed;
index 6190adf965bcab448a81a474d4faec0f6bb4eaa5..f55eed092f25d293a6ed567a3c93ad7da1af3429 100644 (file)
@@ -422,7 +422,7 @@ qed_mfw_get_tlv_time_value(struct qed_mfw_tlv_time *p_time,
        if (p_time->hour > 23)
                p_time->hour = 0;
        if (p_time->min > 59)
-               p_time->hour = 0;
+               p_time->min = 0;
        if (p_time->msec > 999)
                p_time->msec = 0;
        if (p_time->usec > 999)
index 2bf18748581d3d155775a0bc23c0ec2fd32c6f98..fa167b1aa01909a84719d3fe126d49648566a225 100644 (file)
@@ -4404,6 +4404,9 @@ qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate)
        }
 
        vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), (u16)vfid, true);
+       if (!vf)
+               return -EINVAL;
+
        vport_id = vf->vport_id;
 
        return qed_configure_vport_wfq(cdev, vport_id, rate);
@@ -5152,7 +5155,7 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
 
                /* Validate that the VF has a configured vport */
                vf = qed_iov_get_vf_info(hwfn, i, true);
-               if (!vf->vport_instance)
+               if (!vf || !vf->vport_instance)
                        continue;
 
                memset(&params, 0, sizeof(params));
index 3115b2c128980db23bf85851fa9724a5359f0509..eaa50050aa0b798a4ab1cfa201a388d32f88b527 100644 (file)
@@ -724,9 +724,15 @@ static int emac_remove(struct platform_device *pdev)
        struct net_device *netdev = dev_get_drvdata(&pdev->dev);
        struct emac_adapter *adpt = netdev_priv(netdev);
 
+       netif_carrier_off(netdev);
+       netif_tx_disable(netdev);
+
        unregister_netdev(netdev);
        netif_napi_del(&adpt->rx_q.napi);
 
+       free_irq(adpt->irq.irq, &adpt->irq);
+       cancel_work_sync(&adpt->work_thread);
+
        emac_clks_teardown(adpt);
 
        put_device(&adpt->phydev->mdio.dev);
index 0f54849a38235fe7e564f7fe870d501201f6fbc5..894e2690c64372a2beb3ab13907722e823b3ffef 100644 (file)
@@ -1455,8 +1455,6 @@ static int ravb_phy_init(struct net_device *ndev)
                phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
        }
 
-       /* Indicate that the MAC is responsible for managing PHY PM */
-       phydev->mac_managed_pm = true;
        phy_attached_info(phydev);
 
        return 0;
@@ -2379,6 +2377,8 @@ static int ravb_mdio_init(struct ravb_private *priv)
 {
        struct platform_device *pdev = priv->pdev;
        struct device *dev = &pdev->dev;
+       struct phy_device *phydev;
+       struct device_node *pn;
        int error;
 
        /* Bitbang init */
@@ -2400,6 +2400,14 @@ static int ravb_mdio_init(struct ravb_private *priv)
        if (error)
                goto out_free_bus;
 
+       pn = of_parse_phandle(dev->of_node, "phy-handle", 0);
+       phydev = of_phy_find_device(pn);
+       if (phydev) {
+               phydev->mac_managed_pm = true;
+               put_device(&phydev->mdio.dev);
+       }
+       of_node_put(pn);
+
        return 0;
 
 out_free_bus:
index 853394e5bb8b9834959003ec23e014d960930f84..c4f93d24c6a4231589ed390dcaf862fb22314eba 100644 (file)
@@ -702,13 +702,14 @@ static bool rswitch_rx(struct net_device *ndev, int *quota)
        u16 pkt_len;
        u32 get_ts;
 
+       if (*quota <= 0)
+               return true;
+
        boguscnt = min_t(int, gq->ring_size, *quota);
        limit = boguscnt;
 
        desc = &gq->rx_ring[gq->cur];
        while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) {
-               if (--boguscnt < 0)
-                       break;
                dma_rmb();
                pkt_len = le16_to_cpu(desc->desc.info_ds) & RX_DS;
                skb = gq->skbs[gq->cur];
@@ -734,6 +735,9 @@ static bool rswitch_rx(struct net_device *ndev, int *quota)
 
                gq->cur = rswitch_next_queue_index(gq, true, 1);
                desc = &gq->rx_ring[gq->cur];
+
+               if (--boguscnt <= 0)
+                       break;
        }
 
        num = rswitch_get_num_cur_queues(gq);
@@ -745,7 +749,7 @@ static bool rswitch_rx(struct net_device *ndev, int *quota)
                goto err;
        gq->dirty = rswitch_next_queue_index(gq, false, num);
 
-       *quota -= limit - (++boguscnt);
+       *quota -= limit - boguscnt;
 
        return boguscnt <= 0;
 
@@ -1437,7 +1441,10 @@ static int rswitch_open(struct net_device *ndev)
        rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true);
        rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true);
 
-       iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE);
+       if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
+               iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE);
+
+       bitmap_set(rdev->priv->opened_ports, rdev->port, 1);
 
        return 0;
 };
@@ -1448,8 +1455,10 @@ static int rswitch_stop(struct net_device *ndev)
        struct rswitch_gwca_ts_info *ts_info, *ts_info2;
 
        netif_tx_stop_all_queues(ndev);
+       bitmap_clear(rdev->priv->opened_ports, rdev->port, 1);
 
-       iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID);
+       if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
+               iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID);
 
        list_for_each_entry_safe(ts_info, ts_info2, &rdev->priv->gwca.ts_info_list, list) {
                if (ts_info->port != rdev->port)
index 27d3d38c055f0643897e2756d5fb4ca6618bd6b6..b3e0411b408ef887b72c2ad0f343273a1f085178 100644 (file)
@@ -998,6 +998,7 @@ struct rswitch_private {
        struct rcar_gen4_ptp_private *ptp_priv;
 
        struct rswitch_device *rdev[RSWITCH_NUM_PORTS];
+       DECLARE_BITMAP(opened_ports, RSWITCH_NUM_PORTS);
 
        struct rswitch_gwca gwca;
        struct rswitch_etha etha[RSWITCH_NUM_PORTS];
index ed17163d781144f7d41aca9a356f66e4f6ca3879..d8ec729825be44fe5735eb0333100ef0eebab260 100644 (file)
@@ -2029,8 +2029,6 @@ static int sh_eth_phy_init(struct net_device *ndev)
        if (mdp->cd->register_type != SH_ETH_REG_GIGABIT)
                phy_set_max_speed(phydev, SPEED_100);
 
-       /* Indicate that the MAC is responsible for managing PHY PM */
-       phydev->mac_managed_pm = true;
        phy_attached_info(phydev);
 
        return 0;
@@ -3097,6 +3095,8 @@ static int sh_mdio_init(struct sh_eth_private *mdp,
        struct bb_info *bitbang;
        struct platform_device *pdev = mdp->pdev;
        struct device *dev = &mdp->pdev->dev;
+       struct phy_device *phydev;
+       struct device_node *pn;
 
        /* create bit control struct for PHY */
        bitbang = devm_kzalloc(dev, sizeof(struct bb_info), GFP_KERNEL);
@@ -3133,6 +3133,14 @@ static int sh_mdio_init(struct sh_eth_private *mdp,
        if (ret)
                goto out_free_bus;
 
+       pn = of_parse_phandle(dev->of_node, "phy-handle", 0);
+       phydev = of_phy_find_device(pn);
+       if (phydev) {
+               phydev->mac_managed_pm = true;
+               put_device(&phydev->mdio.dev);
+       }
+       of_node_put(pn);
+
        return 0;
 
 out_free_bus:
index 6b5d96bced475f5aac8eab8130988c7451bf6b72..ec9c130276d8925614733a7fb739845d6c5aa199 100644 (file)
@@ -418,6 +418,7 @@ struct dma_features {
        unsigned int frpbs;
        unsigned int frpes;
        unsigned int addr64;
+       unsigned int host_dma_width;
        unsigned int rssen;
        unsigned int vlhash;
        unsigned int sphen;
index ac8580f501e2ef07d2db27f5185e8ace9d5cdb3d..2a2be65d65a03c37d1ba182fe42e9efd125b4087 100644 (file)
@@ -213,8 +213,7 @@ imx_dwmac_parse_dt(struct imx_priv_data *dwmac, struct device *dev)
        struct device_node *np = dev->of_node;
        int err = 0;
 
-       if (of_get_property(np, "snps,rmii_refclk_ext", NULL))
-               dwmac->rmii_refclk_ext = true;
+       dwmac->rmii_refclk_ext = of_property_read_bool(np, "snps,rmii_refclk_ext");
 
        dwmac->clk_tx = devm_clk_get(dev, "tx");
        if (IS_ERR(dwmac->clk_tx)) {
@@ -289,7 +288,7 @@ static int imx_dwmac_probe(struct platform_device *pdev)
                goto err_parse_dt;
        }
 
-       plat_dat->addr64 = dwmac->ops->addr_width;
+       plat_dat->host_dma_width = dwmac->ops->addr_width;
        plat_dat->init = imx_dwmac_init;
        plat_dat->exit = imx_dwmac_exit;
        plat_dat->clks_config = imx_dwmac_clks_config;
index 7deb1f817dacc564fb9c4453a4fba3a6fb6c51bd..13aa919633b47db9d7cfecf16a26bb2f64c19d0c 100644 (file)
@@ -684,7 +684,7 @@ static int ehl_pse0_common_data(struct pci_dev *pdev,
 
        intel_priv->is_pse = true;
        plat->bus_id = 2;
-       plat->addr64 = 32;
+       plat->host_dma_width = 32;
 
        plat->clk_ptp_rate = 200000000;
 
@@ -725,7 +725,7 @@ static int ehl_pse1_common_data(struct pci_dev *pdev,
 
        intel_priv->is_pse = true;
        plat->bus_id = 3;
-       plat->addr64 = 32;
+       plat->host_dma_width = 32;
 
        plat->clk_ptp_rate = 200000000;
 
index 2f7d8e4561d920cf8e2dc7524c6eed9b9dbcbb00..9ae31e3dc82187c8abd6fe3fcaa959d36c84f207 100644 (file)
@@ -591,7 +591,7 @@ static int mediatek_dwmac_common_data(struct platform_device *pdev,
        plat->use_phy_wol = priv_plat->mac_wol ? 0 : 1;
        plat->riwt_off = 1;
        plat->maxmtu = ETH_DATA_LEN;
-       plat->addr64 = priv_plat->variant->dma_bit_mask;
+       plat->host_dma_width = priv_plat->variant->dma_bit_mask;
        plat->bsp_priv = priv_plat;
        plat->init = mediatek_dwmac_init;
        plat->clks_config = mediatek_dwmac_clks_config;
index 8f543c3ab5c564257fa3cca3327383323fd48c24..17310ade88ddaf64d04801e6f1267b978a5f2148 100644 (file)
@@ -1431,7 +1431,7 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
        struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
        gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
 
-       if (priv->dma_cap.addr64 <= 32)
+       if (priv->dma_cap.host_dma_width <= 32)
                gfp |= GFP_DMA32;
 
        if (!buf->page) {
@@ -4587,7 +4587,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
        unsigned int entry = rx_q->dirty_rx;
        gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
 
-       if (priv->dma_cap.addr64 <= 32)
+       if (priv->dma_cap.host_dma_width <= 32)
                gfp |= GFP_DMA32;
 
        while (dirty-- > 0) {
@@ -6205,7 +6205,7 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
        seq_printf(seq, "\tFlexible RX Parser: %s\n",
                   priv->dma_cap.frpsel ? "Y" : "N");
        seq_printf(seq, "\tEnhanced Addressing: %d\n",
-                  priv->dma_cap.addr64);
+                  priv->dma_cap.host_dma_width);
        seq_printf(seq, "\tReceive Side Scaling: %s\n",
                   priv->dma_cap.rssen ? "Y" : "N");
        seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
@@ -7178,20 +7178,22 @@ int stmmac_dvr_probe(struct device *device,
                dev_info(priv->device, "SPH feature enabled\n");
        }
 
-       /* The current IP register MAC_HW_Feature1[ADDR64] only define
-        * 32/40/64 bit width, but some SOC support others like i.MX8MP
-        * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64].
-        * So overwrite dma_cap.addr64 according to HW real design.
+       /* Ideally our host DMA address width is the same as for the
+        * device. However, it may differ and then we have to use our
+        * host DMA width for allocation and the device DMA width for
+        * register handling.
         */
-       if (priv->plat->addr64)
-               priv->dma_cap.addr64 = priv->plat->addr64;
+       if (priv->plat->host_dma_width)
+               priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
+       else
+               priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
 
-       if (priv->dma_cap.addr64) {
+       if (priv->dma_cap.host_dma_width) {
                ret = dma_set_mask_and_coherent(device,
-                               DMA_BIT_MASK(priv->dma_cap.addr64));
+                               DMA_BIT_MASK(priv->dma_cap.host_dma_width));
                if (!ret) {
-                       dev_info(priv->device, "Using %d bits DMA width\n",
-                                priv->dma_cap.addr64);
+                       dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
+                                priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
 
                        /*
                         * If more than 32 bits can be addressed, make sure to
@@ -7206,7 +7208,7 @@ int stmmac_dvr_probe(struct device *device,
                                goto error_hw_init;
                        }
 
-                       priv->dma_cap.addr64 = 32;
+                       priv->dma_cap.host_dma_width = 32;
                }
        }
 
index 8addee6d04bd803636c0e14577220332371632c2..734a817d3c945ed5e6c5ff349760508ff6c5c670 100644 (file)
@@ -287,6 +287,9 @@ static int vsw_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
 
        hp = mdesc_grab();
 
+       if (!hp)
+               return -ENODEV;
+
        rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len);
        err = -ENODEV;
        if (!rmac) {
index e6144d963eaaadb3343906fac020b38f1734533f..ab8b09a9ef61d62dbd71c0ee14332dde971a7e26 100644 (file)
@@ -9271,7 +9271,7 @@ static int niu_get_of_props(struct niu *np)
        if (model)
                strcpy(np->vpd.model, model);
 
-       if (of_find_property(dp, "hot-swappable-phy", NULL)) {
+       if (of_property_read_bool(dp, "hot-swappable-phy")) {
                np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
                        NIU_FLAGS_HOTPLUG_PHY);
        }
index fe86fbd585861c6069533d50c9e8d7f9f0b3a814..e220620d0ffc9070cf07c063a09ccbe1ced18a37 100644 (file)
@@ -433,6 +433,9 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
 
        hp = mdesc_grab();
 
+       if (!hp)
+               return -ENODEV;
+
        vp = vnet_find_parent(hp, vdev->mp, vdev);
        if (IS_ERR(vp)) {
                pr_err("Cannot find port parent vnet\n");
index 16ee9c29cb35a5c77bfc186d7903a70519060b7b..8caf85acbb6af1221a6b4a9d35bb99ac3c0645d5 100644 (file)
@@ -636,6 +636,10 @@ static void am65_cpts_perout_enable_hw(struct am65_cpts *cpts,
                val = lower_32_bits(cycles);
                am65_cpts_write32(cpts, val, genf[req->index].length);
 
+               am65_cpts_write32(cpts, 0, genf[req->index].control);
+               am65_cpts_write32(cpts, 0, genf[req->index].ppm_hi);
+               am65_cpts_write32(cpts, 0, genf[req->index].ppm_low);
+
                cpts->genf_enable |= BIT(req->index);
        } else {
                am65_cpts_write32(cpts, 0, genf[req->index].length);
index e8f38e3f7706eb69e11bf4b82cbba9afbb938b09..25e707d7b87ce320e7b3f68328890ec610d39b6b 100644 (file)
@@ -226,8 +226,7 @@ static int cpsw_phy_sel_probe(struct platform_device *pdev)
        if (IS_ERR(priv->gmii_sel))
                return PTR_ERR(priv->gmii_sel);
 
-       if (of_find_property(pdev->dev.of_node, "rmii-clock-ext", NULL))
-               priv->rmii_clock_external = true;
+       priv->rmii_clock_external = of_property_read_bool(pdev->dev.of_node, "rmii-clock-ext");
 
        dev_set_drvdata(&pdev->dev, priv);
 
index 751fb0bc65c501cb7b4df87c44a453bc10e05c5d..2adf82a32bf6a6b90e4461325c2964fe52eacd0d 100644 (file)
@@ -3583,13 +3583,11 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
        /* init the hw stats lock */
        spin_lock_init(&gbe_dev->hw_stats_lock);
 
-       if (of_find_property(node, "enable-ale", NULL)) {
-               gbe_dev->enable_ale = true;
+       gbe_dev->enable_ale = of_property_read_bool(node, "enable-ale");
+       if (gbe_dev->enable_ale)
                dev_info(dev, "ALE enabled\n");
-       } else {
-               gbe_dev->enable_ale = false;
+       else
                dev_dbg(dev, "ALE bypass enabled*\n");
-       }
 
        ret = of_property_read_u32(node, "tx-queue",
                                   &gbe_dev->tx_queue_id);
index cf8de8a7a8a1eb0ef0c79ebd69daeb6305783ffc..9d535ae5962662106654894dca8bd81b4455e3b5 100644 (file)
@@ -317,15 +317,17 @@ static int gelic_card_init_chain(struct gelic_card *card,
 
        /* set up the hardware pointers in each descriptor */
        for (i = 0; i < no; i++, descr++) {
+               dma_addr_t cpu_addr;
+
                gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE);
-               descr->bus_addr =
-                       dma_map_single(ctodev(card), descr,
-                                      GELIC_DESCR_SIZE,
-                                      DMA_BIDIRECTIONAL);
 
-               if (!descr->bus_addr)
+               cpu_addr = dma_map_single(ctodev(card), descr,
+                                         GELIC_DESCR_SIZE, DMA_BIDIRECTIONAL);
+
+               if (dma_mapping_error(ctodev(card), cpu_addr))
                        goto iommu_error;
 
+               descr->bus_addr = cpu_to_be32(cpu_addr);
                descr->next = descr + 1;
                descr->prev = descr - 1;
        }
@@ -365,26 +367,28 @@ iommu_error:
  *
  * allocates a new rx skb, iommu-maps it and attaches it to the descriptor.
  * Activate the descriptor state-wise
+ *
+ * Gelic RX sk_buffs must be aligned to GELIC_NET_RXBUF_ALIGN and the length
+ * must be a multiple of GELIC_NET_RXBUF_ALIGN.
  */
 static int gelic_descr_prepare_rx(struct gelic_card *card,
                                  struct gelic_descr *descr)
 {
+       static const unsigned int rx_skb_size =
+               ALIGN(GELIC_NET_MAX_FRAME, GELIC_NET_RXBUF_ALIGN) +
+               GELIC_NET_RXBUF_ALIGN - 1;
+       dma_addr_t cpu_addr;
        int offset;
-       unsigned int bufsize;
 
        if (gelic_descr_get_status(descr) !=  GELIC_DESCR_DMA_NOT_IN_USE)
                dev_info(ctodev(card), "%s: ERROR status\n", __func__);
-       /* we need to round up the buffer size to a multiple of 128 */
-       bufsize = ALIGN(GELIC_NET_MAX_MTU, GELIC_NET_RXBUF_ALIGN);
 
-       /* and we need to have it 128 byte aligned, therefore we allocate a
-        * bit more */
-       descr->skb = dev_alloc_skb(bufsize + GELIC_NET_RXBUF_ALIGN - 1);
+       descr->skb = netdev_alloc_skb(*card->netdev, rx_skb_size);
        if (!descr->skb) {
                descr->buf_addr = 0; /* tell DMAC don't touch memory */
                return -ENOMEM;
        }
-       descr->buf_size = cpu_to_be32(bufsize);
+       descr->buf_size = cpu_to_be32(rx_skb_size);
        descr->dmac_cmd_status = 0;
        descr->result_size = 0;
        descr->valid_size = 0;
@@ -395,11 +399,10 @@ static int gelic_descr_prepare_rx(struct gelic_card *card,
        if (offset)
                skb_reserve(descr->skb, GELIC_NET_RXBUF_ALIGN - offset);
        /* io-mmu-map the skb */
-       descr->buf_addr = cpu_to_be32(dma_map_single(ctodev(card),
-                                                    descr->skb->data,
-                                                    GELIC_NET_MAX_MTU,
-                                                    DMA_FROM_DEVICE));
-       if (!descr->buf_addr) {
+       cpu_addr = dma_map_single(ctodev(card), descr->skb->data,
+                                 GELIC_NET_MAX_FRAME, DMA_FROM_DEVICE);
+       descr->buf_addr = cpu_to_be32(cpu_addr);
+       if (dma_mapping_error(ctodev(card), cpu_addr)) {
                dev_kfree_skb_any(descr->skb);
                descr->skb = NULL;
                dev_info(ctodev(card),
@@ -779,7 +782,7 @@ static int gelic_descr_prepare_tx(struct gelic_card *card,
 
        buf = dma_map_single(ctodev(card), skb->data, skb->len, DMA_TO_DEVICE);
 
-       if (!buf) {
+       if (dma_mapping_error(ctodev(card), buf)) {
                dev_err(ctodev(card),
                        "dma map 2 failed (%p, %i). Dropping packet\n",
                        skb->data, skb->len);
@@ -915,7 +918,7 @@ static void gelic_net_pass_skb_up(struct gelic_descr *descr,
        data_error = be32_to_cpu(descr->data_error);
        /* unmap skb buffer */
        dma_unmap_single(ctodev(card), be32_to_cpu(descr->buf_addr),
-                        GELIC_NET_MAX_MTU,
+                        GELIC_NET_MAX_FRAME,
                         DMA_FROM_DEVICE);
 
        skb_put(skb, be32_to_cpu(descr->valid_size)?
index 68f324ed4eaf0841072efd46c999455c6c11a8e6..0d98defb011ed7976c88be928f656c634ef3c23f 100644 (file)
@@ -19,8 +19,9 @@
 #define GELIC_NET_RX_DESCRIPTORS        128 /* num of descriptors */
 #define GELIC_NET_TX_DESCRIPTORS        128 /* num of descriptors */
 
-#define GELIC_NET_MAX_MTU               VLAN_ETH_FRAME_LEN
-#define GELIC_NET_MIN_MTU               VLAN_ETH_ZLEN
+#define GELIC_NET_MAX_FRAME             2312
+#define GELIC_NET_MAX_MTU               2294
+#define GELIC_NET_MIN_MTU               64
 #define GELIC_NET_RXBUF_ALIGN           128
 #define GELIC_CARD_RX_CSUM_DEFAULT      1 /* hw chksum */
 #define GELIC_NET_WATCHDOG_TIMEOUT      5*HZ
index a502812ac418a6e26aab5d03a4ead85a0ce38900..86f7843b4591c6a8e48e6225059bd68d76b91da7 100644 (file)
@@ -2709,8 +2709,7 @@ static int velocity_get_platform_info(struct velocity_info *vptr)
        struct resource res;
        int ret;
 
-       if (of_get_property(vptr->dev->of_node, "no-eeprom", NULL))
-               vptr->no_eeprom = 1;
+       vptr->no_eeprom = of_property_read_bool(vptr->dev->of_node, "no-eeprom");
 
        ret = of_address_to_resource(vptr->dev->of_node, 0, &res);
        if (ret) {
index ffdac6fac05499d993debc1fec9f125960b04253..f64ed39b93d8312a5711515243c1408aa2944e46 100644 (file)
@@ -1383,7 +1383,7 @@ struct velocity_info {
        struct device *dev;
        struct pci_dev *pdev;
        struct net_device *netdev;
-       int no_eeprom;
+       bool no_eeprom;
 
        unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
        u8 ip_addr[4];
index 1066420d6a83aa55ff6689329b44c0b57ffa37a4..e0ac1bcd9925c2ebfe122ff1faf9a40479a0afdb 100644 (file)
@@ -1455,12 +1455,11 @@ static int temac_probe(struct platform_device *pdev)
         * endianness mode.  Default for OF devices is big-endian.
         */
        little_endian = false;
-       if (temac_np) {
-               if (of_get_property(temac_np, "little-endian", NULL))
-                       little_endian = true;
-       } else if (pdata) {
+       if (temac_np)
+               little_endian = of_property_read_bool(temac_np, "little-endian");
+       else if (pdata)
                little_endian = pdata->reg_little_endian;
-       }
+
        if (little_endian) {
                lp->temac_ior = _temac_ior_le;
                lp->temac_iow = _temac_iow_le;
index 894e92ef415b98b761af99fa917838b0f640e8fb..9f505cf02d9651d63e3c393ad954690f41a595ee 100644 (file)
@@ -503,6 +503,11 @@ static void
 xirc2ps_detach(struct pcmcia_device *link)
 {
     struct net_device *dev = link->priv;
+    struct local_info *local = netdev_priv(dev);
+
+    netif_carrier_off(dev);
+    netif_tx_disable(dev);
+    cancel_work_sync(&local->tx_timeout_task);
 
     dev_dbg(&link->dev, "detach\n");
 
index 1412b67304c8e9a12671d84a51c428b2a39660cb..1651fbad4bd54afa42301ba08f5d1bb0c4e01f15 100644 (file)
@@ -15,6 +15,14 @@ static bool gsi_reg_id_valid(struct gsi *gsi, enum gsi_reg_id reg_id)
        switch (reg_id) {
        case INTER_EE_SRC_CH_IRQ_MSK:
        case INTER_EE_SRC_EV_CH_IRQ_MSK:
+               return gsi->version >= IPA_VERSION_3_5;
+
+       case HW_PARAM_2:
+               return gsi->version >= IPA_VERSION_3_5_1;
+
+       case HW_PARAM_4:
+               return gsi->version >= IPA_VERSION_5_0;
+
        case CH_C_CNTXT_0:
        case CH_C_CNTXT_1:
        case CH_C_CNTXT_2:
@@ -43,7 +51,6 @@ static bool gsi_reg_id_valid(struct gsi *gsi, enum gsi_reg_id reg_id)
        case CH_CMD:
        case EV_CH_CMD:
        case GENERIC_CMD:
-       case HW_PARAM_2:
        case CNTXT_TYPE_IRQ:
        case CNTXT_TYPE_IRQ_MSK:
        case CNTXT_SRC_CH_IRQ:
index f62f0a5c653d1a6689f898bf6c67124147e8ac7d..48fde65fa2e8a59d8ed82e3535f0d7cff4677380 100644 (file)
 
 #include <linux/bits.h>
 
+struct platform_device;
+
+struct gsi;
+
 /**
  * DOC: GSI Registers
  *
index 735fa659160979316feaeb3ffd3e89d41e48a2f5..3f475428ddddb467fe287be3b11d77423b99af0d 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 
 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2022 Linaro Ltd.
+ * Copyright (C) 2019-2023 Linaro Ltd.
  */
 
 #include <linux/io.h>
@@ -15,6 +15,17 @@ static bool ipa_reg_id_valid(struct ipa *ipa, enum ipa_reg_id reg_id)
        enum ipa_version version = ipa->version;
 
        switch (reg_id) {
+       case FILT_ROUT_HASH_EN:
+               return version == IPA_VERSION_4_2;
+
+       case FILT_ROUT_HASH_FLUSH:
+               return version < IPA_VERSION_5_0 && version != IPA_VERSION_4_2;
+
+       case FILT_ROUT_CACHE_FLUSH:
+       case ENDP_FILTER_CACHE_CFG:
+       case ENDP_ROUTER_CACHE_CFG:
+               return version >= IPA_VERSION_5_0;
+
        case IPA_BCR:
        case COUNTER_CFG:
                return version < IPA_VERSION_4_5;
@@ -32,14 +43,17 @@ static bool ipa_reg_id_valid(struct ipa *ipa, enum ipa_reg_id reg_id)
        case SRC_RSRC_GRP_45_RSRC_TYPE:
        case DST_RSRC_GRP_45_RSRC_TYPE:
                return version <= IPA_VERSION_3_1 ||
-                      version == IPA_VERSION_4_5;
+                      version == IPA_VERSION_4_5 ||
+                      version == IPA_VERSION_5_0;
 
        case SRC_RSRC_GRP_67_RSRC_TYPE:
        case DST_RSRC_GRP_67_RSRC_TYPE:
-               return version <= IPA_VERSION_3_1;
+               return version <= IPA_VERSION_3_1 ||
+                      version == IPA_VERSION_5_0;
 
        case ENDP_FILTER_ROUTER_HSH_CFG:
-               return version != IPA_VERSION_4_2;
+               return version < IPA_VERSION_5_0 &&
+                       version != IPA_VERSION_4_2;
 
        case IRQ_SUSPEND_EN:
        case IRQ_SUSPEND_CLR:
@@ -51,10 +65,6 @@ static bool ipa_reg_id_valid(struct ipa *ipa, enum ipa_reg_id reg_id)
        case SHARED_MEM_SIZE:
        case QSB_MAX_WRITES:
        case QSB_MAX_READS:
-       case FILT_ROUT_HASH_EN:
-       case FILT_ROUT_CACHE_CFG:
-       case FILT_ROUT_HASH_FLUSH:
-       case FILT_ROUT_CACHE_FLUSH:
        case STATE_AGGR_ACTIVE:
        case LOCAL_PKT_PROC_CNTXT:
        case AGGR_FORCE_CLOSE:
@@ -76,8 +86,6 @@ static bool ipa_reg_id_valid(struct ipa *ipa, enum ipa_reg_id reg_id)
        case ENDP_INIT_RSRC_GRP:
        case ENDP_INIT_SEQ:
        case ENDP_STATUS:
-       case ENDP_FILTER_CACHE_CFG:
-       case ENDP_ROUTER_CACHE_CFG:
        case IPA_IRQ_STTS:
        case IPA_IRQ_EN:
        case IPA_IRQ_CLR:
index 28aa1351dd4880e4f9e0ac6bb43ccb785cd83b66..7dd65d39333ddaefb0e3cff1e22421f41bed8eec 100644 (file)
@@ -60,9 +60,8 @@ enum ipa_reg_id {
        SHARED_MEM_SIZE,
        QSB_MAX_WRITES,
        QSB_MAX_READS,
-       FILT_ROUT_HASH_EN,                              /* Not IPA v5.0+ */
-       FILT_ROUT_CACHE_CFG,                            /* IPA v5.0+ */
-       FILT_ROUT_HASH_FLUSH,                           /* Not IPA v5.0+ */
+       FILT_ROUT_HASH_EN,                              /* IPA v4.2 */
+       FILT_ROUT_HASH_FLUSH,                   /* Not IPA v4.2 nor IPA v5.0+ */
        FILT_ROUT_CACHE_FLUSH,                          /* IPA v5.0+ */
        STATE_AGGR_ACTIVE,
        IPA_BCR,                                        /* Not IPA v4.5+ */
@@ -77,12 +76,12 @@ enum ipa_reg_id {
        TIMERS_PULSE_GRAN_CFG,                          /* IPA v4.5+ */
        SRC_RSRC_GRP_01_RSRC_TYPE,
        SRC_RSRC_GRP_23_RSRC_TYPE,
-       SRC_RSRC_GRP_45_RSRC_TYPE,              /* Not IPA v3.5+, IPA v4.5 */
-       SRC_RSRC_GRP_67_RSRC_TYPE,                      /* Not IPA v3.5+ */
+       SRC_RSRC_GRP_45_RSRC_TYPE,      /* Not IPA v3.5+; IPA v4.5, IPA v5.0 */
+       SRC_RSRC_GRP_67_RSRC_TYPE,              /* Not IPA v3.5+; IPA v5.0 */
        DST_RSRC_GRP_01_RSRC_TYPE,
        DST_RSRC_GRP_23_RSRC_TYPE,
-       DST_RSRC_GRP_45_RSRC_TYPE,              /* Not IPA v3.5+, IPA v4.5 */
-       DST_RSRC_GRP_67_RSRC_TYPE,                      /* Not IPA v3.5+ */
+       DST_RSRC_GRP_45_RSRC_TYPE,      /* Not IPA v3.5+; IPA v4.5, IPA v5.0 */
+       DST_RSRC_GRP_67_RSRC_TYPE,              /* Not IPA v3.5+; IPA v5.0 */
        ENDP_INIT_CTRL,         /* Not IPA v4.2+ for TX, not IPA v4.0+ for RX */
        ENDP_INIT_CFG,
        ENDP_INIT_NAT,                  /* TX only */
@@ -206,14 +205,6 @@ enum ipa_reg_qsb_max_reads_field_id {
        GEN_QMB_1_MAX_READS_BEATS,                      /* IPA v4.0+ */
 };
 
-/* FILT_ROUT_CACHE_CFG register */
-enum ipa_reg_filt_rout_cache_cfg_field_id {
-       ROUTER_CACHE_EN,
-       FILTER_CACHE_EN,
-       LOW_PRI_HASH_HIT_DISABLE,
-       LRU_EVICTION_THRESHOLD,
-};
-
 /* FILT_ROUT_HASH_EN and FILT_ROUT_HASH_FLUSH registers */
 enum ipa_reg_filt_rout_hash_field_id {
        IPV6_ROUTER_HASH,
index 57b457f39b6e2b24c309d0849ce119ecfee838ea..2ee07eebca6774fc9fe60e9b2c51efb0a2f37101 100644 (file)
@@ -6,7 +6,8 @@
 #define _REG_H_
 
 #include <linux/types.h>
-#include <linux/bits.h>
+#include <linux/log2.h>
+#include <linux/bug.h>
 
 /**
  * struct reg - A register descriptor
index 648b51b88d4e862335f2a9db6f9fd1e135d1895f..2900e5c3ff88833574d92cbea552c0ba57759536 100644 (file)
@@ -137,17 +137,17 @@ REG_STRIDE(EV_CH_E_SCRATCH_1, ev_ch_e_scratch_1,
           0x0001004c + 0x4000 * GSI_EE_AP, 0x80);
 
 REG_STRIDE(CH_C_DOORBELL_0, ch_c_doorbell_0,
-          0x0001e000 + 0x4000 * GSI_EE_AP, 0x08);
+          0x00011000 + 0x4000 * GSI_EE_AP, 0x08);
 
 REG_STRIDE(EV_CH_E_DOORBELL_0, ev_ch_e_doorbell_0,
-          0x0001e100 + 0x4000 * GSI_EE_AP, 0x08);
+          0x00011100 + 0x4000 * GSI_EE_AP, 0x08);
 
 static const u32 reg_gsi_status_fmask[] = {
        [ENABLED]                                       = BIT(0),
                                                /* Bits 1-31 reserved */
 };
 
-REG_FIELDS(GSI_STATUS, gsi_status, 0x0001f000 + 0x4000 * GSI_EE_AP);
+REG_FIELDS(GSI_STATUS, gsi_status, 0x00012000 + 0x4000 * GSI_EE_AP);
 
 static const u32 reg_ch_cmd_fmask[] = {
        [CH_CHID]                                       = GENMASK(7, 0),
@@ -155,7 +155,7 @@ static const u32 reg_ch_cmd_fmask[] = {
        [CH_OPCODE]                                     = GENMASK(31, 24),
 };
 
-REG_FIELDS(CH_CMD, ch_cmd, 0x0001f008 + 0x4000 * GSI_EE_AP);
+REG_FIELDS(CH_CMD, ch_cmd, 0x00012008 + 0x4000 * GSI_EE_AP);
 
 static const u32 reg_ev_ch_cmd_fmask[] = {
        [EV_CHID]                                       = GENMASK(7, 0),
@@ -163,7 +163,7 @@ static const u32 reg_ev_ch_cmd_fmask[] = {
        [EV_OPCODE]                                     = GENMASK(31, 24),
 };
 
-REG_FIELDS(EV_CH_CMD, ev_ch_cmd, 0x0001f010 + 0x4000 * GSI_EE_AP);
+REG_FIELDS(EV_CH_CMD, ev_ch_cmd, 0x00012010 + 0x4000 * GSI_EE_AP);
 
 static const u32 reg_generic_cmd_fmask[] = {
        [GENERIC_OPCODE]                                = GENMASK(4, 0),
@@ -172,7 +172,7 @@ static const u32 reg_generic_cmd_fmask[] = {
                                                /* Bits 14-31 reserved */
 };
 
-REG_FIELDS(GENERIC_CMD, generic_cmd, 0x0001f018 + 0x4000 * GSI_EE_AP);
+REG_FIELDS(GENERIC_CMD, generic_cmd, 0x00012018 + 0x4000 * GSI_EE_AP);
 
 static const u32 reg_hw_param_2_fmask[] = {
        [IRAM_SIZE]                                     = GENMASK(2, 0),
@@ -188,58 +188,58 @@ static const u32 reg_hw_param_2_fmask[] = {
        [GSI_USE_INTER_EE]                              = BIT(31),
 };
 
-REG_FIELDS(HW_PARAM_2, hw_param_2, 0x0001f040 + 0x4000 * GSI_EE_AP);
+REG_FIELDS(HW_PARAM_2, hw_param_2, 0x00012040 + 0x4000 * GSI_EE_AP);
 
-REG(CNTXT_TYPE_IRQ, cntxt_type_irq, 0x0001f080 + 0x4000 * GSI_EE_AP);
+REG(CNTXT_TYPE_IRQ, cntxt_type_irq, 0x00012080 + 0x4000 * GSI_EE_AP);
 
-REG(CNTXT_TYPE_IRQ_MSK, cntxt_type_irq_msk, 0x0001f088 + 0x4000 * GSI_EE_AP);
+REG(CNTXT_TYPE_IRQ_MSK, cntxt_type_irq_msk, 0x00012088 + 0x4000 * GSI_EE_AP);
 
-REG(CNTXT_SRC_CH_IRQ, cntxt_src_ch_irq, 0x0001f090 + 0x4000 * GSI_EE_AP);
+REG(CNTXT_SRC_CH_IRQ, cntxt_src_ch_irq, 0x00012090 + 0x4000 * GSI_EE_AP);
 
-REG(CNTXT_SRC_EV_CH_IRQ, cntxt_src_ev_ch_irq, 0x0001f094 + 0x4000 * GSI_EE_AP);
+REG(CNTXT_SRC_EV_CH_IRQ, cntxt_src_ev_ch_irq, 0x00012094 + 0x4000 * GSI_EE_AP);
 
 REG(CNTXT_SRC_CH_IRQ_MSK, cntxt_src_ch_irq_msk,
-    0x0001f098 + 0x4000 * GSI_EE_AP);
+    0x00012098 + 0x4000 * GSI_EE_AP);
 
 REG(CNTXT_SRC_EV_CH_IRQ_MSK, cntxt_src_ev_ch_irq_msk,
-    0x0001f09c + 0x4000 * GSI_EE_AP);
+    0x0001209c + 0x4000 * GSI_EE_AP);
 
 REG(CNTXT_SRC_CH_IRQ_CLR, cntxt_src_ch_irq_clr,
-    0x0001f0a0 + 0x4000 * GSI_EE_AP);
+    0x000120a0 + 0x4000 * GSI_EE_AP);
 
 REG(CNTXT_SRC_EV_CH_IRQ_CLR, cntxt_src_ev_ch_irq_clr,
-    0x0001f0a4 + 0x4000 * GSI_EE_AP);
+    0x000120a4 + 0x4000 * GSI_EE_AP);
 
-REG(CNTXT_SRC_IEOB_IRQ, cntxt_src_ieob_irq, 0x0001f0b0 + 0x4000 * GSI_EE_AP);
+REG(CNTXT_SRC_IEOB_IRQ, cntxt_src_ieob_irq, 0x000120b0 + 0x4000 * GSI_EE_AP);
 
 REG(CNTXT_SRC_IEOB_IRQ_MSK, cntxt_src_ieob_irq_msk,
-    0x0001f0b8 + 0x4000 * GSI_EE_AP);
+    0x000120b8 + 0x4000 * GSI_EE_AP);
 
 REG(CNTXT_SRC_IEOB_IRQ_CLR, cntxt_src_ieob_irq_clr,
-    0x0001f0c0 + 0x4000 * GSI_EE_AP);
+    0x000120c0 + 0x4000 * GSI_EE_AP);
 
-REG(CNTXT_GLOB_IRQ_STTS, cntxt_glob_irq_stts, 0x0001f100 + 0x4000 * GSI_EE_AP);
+REG(CNTXT_GLOB_IRQ_STTS, cntxt_glob_irq_stts, 0x00012100 + 0x4000 * GSI_EE_AP);
 
-REG(CNTXT_GLOB_IRQ_EN, cntxt_glob_irq_en, 0x0001f108 + 0x4000 * GSI_EE_AP);
+REG(CNTXT_GLOB_IRQ_EN, cntxt_glob_irq_en, 0x00012108 + 0x4000 * GSI_EE_AP);
 
-REG(CNTXT_GLOB_IRQ_CLR, cntxt_glob_irq_clr, 0x0001f110 + 0x4000 * GSI_EE_AP);
+REG(CNTXT_GLOB_IRQ_CLR, cntxt_glob_irq_clr, 0x00012110 + 0x4000 * GSI_EE_AP);
 
-REG(CNTXT_GSI_IRQ_STTS, cntxt_gsi_irq_stts, 0x0001f118 + 0x4000 * GSI_EE_AP);
+REG(CNTXT_GSI_IRQ_STTS, cntxt_gsi_irq_stts, 0x00012118 + 0x4000 * GSI_EE_AP);
 
-REG(CNTXT_GSI_IRQ_EN, cntxt_gsi_irq_en, 0x0001f120 + 0x4000 * GSI_EE_AP);
+REG(CNTXT_GSI_IRQ_EN, cntxt_gsi_irq_en, 0x00012120 + 0x4000 * GSI_EE_AP);
 
-REG(CNTXT_GSI_IRQ_CLR, cntxt_gsi_irq_clr, 0x0001f128 + 0x4000 * GSI_EE_AP);
+REG(CNTXT_GSI_IRQ_CLR, cntxt_gsi_irq_clr, 0x00012128 + 0x4000 * GSI_EE_AP);
 
 static const u32 reg_cntxt_intset_fmask[] = {
        [INTYPE]                                        = BIT(0)
                                                /* Bits 1-31 reserved */
 };
 
-REG_FIELDS(CNTXT_INTSET, cntxt_intset, 0x0001f180 + 0x4000 * GSI_EE_AP);
+REG_FIELDS(CNTXT_INTSET, cntxt_intset, 0x00012180 + 0x4000 * GSI_EE_AP);
 
-REG_FIELDS(ERROR_LOG, error_log, 0x0001f200 + 0x4000 * GSI_EE_AP);
+REG_FIELDS(ERROR_LOG, error_log, 0x00012200 + 0x4000 * GSI_EE_AP);
 
-REG(ERROR_LOG_CLR, error_log_clr, 0x0001f210 + 0x4000 * GSI_EE_AP);
+REG(ERROR_LOG_CLR, error_log_clr, 0x00012210 + 0x4000 * GSI_EE_AP);
 
 static const u32 reg_cntxt_scratch_0_fmask[] = {
        [INTER_EE_RESULT]                               = GENMASK(2, 0),
@@ -248,7 +248,7 @@ static const u32 reg_cntxt_scratch_0_fmask[] = {
                                                /* Bits 8-31 reserved */
 };
 
-REG_FIELDS(CNTXT_SCRATCH_0, cntxt_scratch_0, 0x0001f400 + 0x4000 * GSI_EE_AP);
+REG_FIELDS(CNTXT_SCRATCH_0, cntxt_scratch_0, 0x00012400 + 0x4000 * GSI_EE_AP);
 
 static const struct reg *reg_array[] = {
        [INTER_EE_SRC_CH_IRQ_MSK]       = &reg_inter_ee_src_ch_irq_msk,
index 4bf45d264d6b914133bf13148fb3a154727be7f5..8b5d95425a766c97f97f3d2646fe1bc104d7412b 100644 (file)
@@ -27,7 +27,7 @@ static const u32 reg_ch_c_cntxt_0_fmask[] = {
 };
 
 REG_STRIDE_FIELDS(CH_C_CNTXT_0, ch_c_cntxt_0,
-                 0x0001c000 + 0x4000 * GSI_EE_AP, 0x80);
+                 0x0000f000 + 0x4000 * GSI_EE_AP, 0x80);
 
 static const u32 reg_ch_c_cntxt_1_fmask[] = {
        [CH_R_LENGTH]                                   = GENMASK(19, 0),
@@ -35,11 +35,11 @@ static const u32 reg_ch_c_cntxt_1_fmask[] = {
 };
 
 REG_STRIDE_FIELDS(CH_C_CNTXT_1, ch_c_cntxt_1,
-                 0x0001c004 + 0x4000 * GSI_EE_AP, 0x80);
+                 0x0000f004 + 0x4000 * GSI_EE_AP, 0x80);
 
-REG_STRIDE(CH_C_CNTXT_2, ch_c_cntxt_2, 0x0001c008 + 0x4000 * GSI_EE_AP, 0x80);
+REG_STRIDE(CH_C_CNTXT_2, ch_c_cntxt_2, 0x0000f008 + 0x4000 * GSI_EE_AP, 0x80);
 
-REG_STRIDE(CH_C_CNTXT_3, ch_c_cntxt_3, 0x0001c00c + 0x4000 * GSI_EE_AP, 0x80);
+REG_STRIDE(CH_C_CNTXT_3, ch_c_cntxt_3, 0x0000f00c + 0x4000 * GSI_EE_AP, 0x80);
 
 static const u32 reg_ch_c_qos_fmask[] = {
        [WRR_WEIGHT]                                    = GENMASK(3, 0),
@@ -53,7 +53,7 @@ static const u32 reg_ch_c_qos_fmask[] = {
                                                /* Bits 25-31 reserved */
 };
 
-REG_STRIDE_FIELDS(CH_C_QOS, ch_c_qos, 0x0001c05c + 0x4000 * GSI_EE_AP, 0x80);
+REG_STRIDE_FIELDS(CH_C_QOS, ch_c_qos, 0x0000f05c + 0x4000 * GSI_EE_AP, 0x80);
 
 static const u32 reg_error_log_fmask[] = {
        [ERR_ARG3]                                      = GENMASK(3, 0),
@@ -67,16 +67,16 @@ static const u32 reg_error_log_fmask[] = {
 };
 
 REG_STRIDE(CH_C_SCRATCH_0, ch_c_scratch_0,
-          0x0001c060 + 0x4000 * GSI_EE_AP, 0x80);
+          0x0000f060 + 0x4000 * GSI_EE_AP, 0x80);
 
 REG_STRIDE(CH_C_SCRATCH_1, ch_c_scratch_1,
-          0x0001c064 + 0x4000 * GSI_EE_AP, 0x80);
+          0x0000f064 + 0x4000 * GSI_EE_AP, 0x80);
 
 REG_STRIDE(CH_C_SCRATCH_2, ch_c_scratch_2,
-          0x0001c068 + 0x4000 * GSI_EE_AP, 0x80);
+          0x0000f068 + 0x4000 * GSI_EE_AP, 0x80);
 
 REG_STRIDE(CH_C_SCRATCH_3, ch_c_scratch_3,
-          0x0001c06c + 0x4000 * GSI_EE_AP, 0x80);
+          0x0000f06c + 0x4000 * GSI_EE_AP, 0x80);
 
 static const u32 reg_ev_ch_e_cntxt_0_fmask[] = {
        [EV_CHTYPE]                                     = GENMASK(3, 0),
@@ -89,23 +89,23 @@ static const u32 reg_ev_ch_e_cntxt_0_fmask[] = {
 };
 
 REG_STRIDE_FIELDS(EV_CH_E_CNTXT_0, ev_ch_e_cntxt_0,
-                 0x0001d000 + 0x4000 * GSI_EE_AP, 0x80);
+                 0x00010000 + 0x4000 * GSI_EE_AP, 0x80);
 
 static const u32 reg_ev_ch_e_cntxt_1_fmask[] = {
        [R_LENGTH]                                      = GENMASK(15, 0),
 };
 
 REG_STRIDE_FIELDS(EV_CH_E_CNTXT_1, ev_ch_e_cntxt_1,
-                 0x0001d004 + 0x4000 * GSI_EE_AP, 0x80);
+                 0x00010004 + 0x4000 * GSI_EE_AP, 0x80);
 
 REG_STRIDE(EV_CH_E_CNTXT_2, ev_ch_e_cntxt_2,
-          0x0001d008 + 0x4000 * GSI_EE_AP, 0x80);
+          0x00010008 + 0x4000 * GSI_EE_AP, 0x80);
 
 REG_STRIDE(EV_CH_E_CNTXT_3, ev_ch_e_cntxt_3,
-          0x0001d00c + 0x4000 * GSI_EE_AP, 0x80);
+          0x0001000c + 0x4000 * GSI_EE_AP, 0x80);
 
 REG_STRIDE(EV_CH_E_CNTXT_4, ev_ch_e_cntxt_4,
-          0x0001d010 + 0x4000 * GSI_EE_AP, 0x80);
+          0x00010010 + 0x4000 * GSI_EE_AP, 0x80);
 
 static const u32 reg_ev_ch_e_cntxt_8_fmask[] = {
        [EV_MODT]                                       = GENMASK(15, 0),
@@ -114,28 +114,28 @@ static const u32 reg_ev_ch_e_cntxt_8_fmask[] = {
 };
 
 REG_STRIDE_FIELDS(EV_CH_E_CNTXT_8, ev_ch_e_cntxt_8,
-                 0x0001d020 + 0x4000 * GSI_EE_AP, 0x80);
+                 0x00010020 + 0x4000 * GSI_EE_AP, 0x80);
 
 REG_STRIDE(EV_CH_E_CNTXT_9, ev_ch_e_cntxt_9,
-          0x0001d024 + 0x4000 * GSI_EE_AP, 0x80);
+          0x00010024 + 0x4000 * GSI_EE_AP, 0x80);
 
 REG_STRIDE(EV_CH_E_CNTXT_10, ev_ch_e_cntxt_10,
-          0x0001d028 + 0x4000 * GSI_EE_AP, 0x80);
+          0x00010028 + 0x4000 * GSI_EE_AP, 0x80);
 
 REG_STRIDE(EV_CH_E_CNTXT_11, ev_ch_e_cntxt_11,
-          0x0001d02c + 0x4000 * GSI_EE_AP, 0x80);
+          0x0001002c + 0x4000 * GSI_EE_AP, 0x80);
 
 REG_STRIDE(EV_CH_E_CNTXT_12, ev_ch_e_cntxt_12,
-          0x0001d030 + 0x4000 * GSI_EE_AP, 0x80);
+          0x00010030 + 0x4000 * GSI_EE_AP, 0x80);
 
 REG_STRIDE(EV_CH_E_CNTXT_13, ev_ch_e_cntxt_13,
-          0x0001d034 + 0x4000 * GSI_EE_AP, 0x80);
+          0x00010034 + 0x4000 * GSI_EE_AP, 0x80);
 
 REG_STRIDE(EV_CH_E_SCRATCH_0, ev_ch_e_scratch_0,
-          0x0001d048 + 0x4000 * GSI_EE_AP, 0x80);
+          0x00010048 + 0x4000 * GSI_EE_AP, 0x80);
 
 REG_STRIDE(EV_CH_E_SCRATCH_1, ev_ch_e_scratch_1,
-          0x0001d04c + 0x4000 * GSI_EE_AP, 0x80);
+          0x0001004c + 0x4000 * GSI_EE_AP, 0x80);
 
 REG_STRIDE(CH_C_DOORBELL_0, ch_c_doorbell_0,
           0x00011000 + 0x4000 * GSI_EE_AP, 0x08);
index 943d26cbf39f5d8d14ed247f4918d51b5104f31f..71712ea25403dd71322f36297bb54f782d789f2c 100644 (file)
@@ -101,6 +101,7 @@ static unsigned int ipvlan_nf_input(void *priv, struct sk_buff *skb,
                goto out;
 
        skb->dev = addr->master->dev;
+       skb->skb_iif = skb->dev->ifindex;
        len = skb->len + ETH_HLEN;
        ipvlan_count_rx(addr->master, len, true, false);
 out:
index d77c987fda9cd1270022a657b37aa61ec870b249..4630dde019749a7a4de81c27b9f6af902bad1c2f 100644 (file)
@@ -18,16 +18,18 @@ MODULE_AUTHOR("Calvin Johnson <calvin.johnson@oss.nxp.com>");
 MODULE_LICENSE("GPL");
 
 /**
- * acpi_mdiobus_register - Register mii_bus and create PHYs from the ACPI ASL.
+ * __acpi_mdiobus_register - Register mii_bus and create PHYs from the ACPI ASL.
  * @mdio: pointer to mii_bus structure
  * @fwnode: pointer to fwnode of MDIO bus. This fwnode is expected to represent
+ * @owner: module owning this @mdio object.
  * an ACPI device object corresponding to the MDIO bus and its children are
  * expected to correspond to the PHY devices on that bus.
  *
  * This function registers the mii_bus structure and registers a phy_device
  * for each child node of @fwnode.
  */
-int acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode)
+int __acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode,
+                           struct module *owner)
 {
        struct fwnode_handle *child;
        u32 addr;
@@ -35,7 +37,7 @@ int acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode)
 
        /* Mask out all PHYs from auto probing. */
        mdio->phy_mask = GENMASK(31, 0);
-       ret = mdiobus_register(mdio);
+       ret = __mdiobus_register(mdio, owner);
        if (ret)
                return ret;
 
@@ -55,4 +57,4 @@ int acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode)
        }
        return 0;
 }
-EXPORT_SYMBOL(acpi_mdiobus_register);
+EXPORT_SYMBOL(__acpi_mdiobus_register);
index 3847ee92c1096c1569442acea2120c279ad2dc60..6067d96b2b7bf030a09a67bfad7490fea60d219f 100644 (file)
@@ -106,6 +106,7 @@ static int thunder_mdiobus_pci_probe(struct pci_dev *pdev,
                if (i >= ARRAY_SIZE(nexus->buses))
                        break;
        }
+       fwnode_handle_put(fwn);
        return 0;
 
 err_release_regions:
index 510822d6d0d90ce4fe012388e71733846c57c115..1e46e39f5f46a0857570682a86e23d876ef77704 100644 (file)
@@ -139,21 +139,23 @@ bool of_mdiobus_child_is_phy(struct device_node *child)
 EXPORT_SYMBOL(of_mdiobus_child_is_phy);
 
 /**
- * of_mdiobus_register - Register mii_bus and create PHYs from the device tree
+ * __of_mdiobus_register - Register mii_bus and create PHYs from the device tree
  * @mdio: pointer to mii_bus structure
  * @np: pointer to device_node of MDIO bus.
+ * @owner: module owning the @mdio object.
  *
  * This function registers the mii_bus structure and registers a phy_device
  * for each child node of @np.
  */
-int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
+int __of_mdiobus_register(struct mii_bus *mdio, struct device_node *np,
+                         struct module *owner)
 {
        struct device_node *child;
        bool scanphys = false;
        int addr, rc;
 
        if (!np)
-               return mdiobus_register(mdio);
+               return __mdiobus_register(mdio, owner);
 
        /* Do not continue if the node is disabled */
        if (!of_device_is_available(np))
@@ -172,7 +174,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
        of_property_read_u32(np, "reset-post-delay-us", &mdio->reset_post_delay_us);
 
        /* Register the MDIO bus */
-       rc = mdiobus_register(mdio);
+       rc = __mdiobus_register(mdio, owner);
        if (rc)
                return rc;
 
@@ -236,7 +238,7 @@ unregister:
        mdiobus_unregister(mdio);
        return rc;
 }
-EXPORT_SYMBOL(of_mdiobus_register);
+EXPORT_SYMBOL(__of_mdiobus_register);
 
 /**
  * of_mdio_find_device - Given a device tree node, find the mdio_device
index b560e99695dfd3f4ca3db7a8f9080a1629a26408..69b829e6ab35b84a07f0063f3a6f7b48ea1a6de1 100644 (file)
@@ -98,13 +98,14 @@ EXPORT_SYMBOL(__devm_mdiobus_register);
 
 #if IS_ENABLED(CONFIG_OF_MDIO)
 /**
- * devm_of_mdiobus_register - Resource managed variant of of_mdiobus_register()
+ * __devm_of_mdiobus_register - Resource managed variant of of_mdiobus_register()
  * @dev:       Device to register mii_bus for
  * @mdio:      MII bus structure to register
  * @np:                Device node to parse
+ * @owner:     Owning module
  */
-int devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio,
-                            struct device_node *np)
+int __devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio,
+                              struct device_node *np, struct module *owner)
 {
        struct mdiobus_devres *dr;
        int ret;
@@ -117,7 +118,7 @@ int devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio,
        if (!dr)
                return -ENOMEM;
 
-       ret = of_mdiobus_register(mdio, np);
+       ret = __of_mdiobus_register(mdio, np, owner);
        if (ret) {
                devres_free(dr);
                return ret;
@@ -127,7 +128,7 @@ int devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio,
        devres_add(dev, dr);
        return 0;
 }
-EXPORT_SYMBOL(devm_of_mdiobus_register);
+EXPORT_SYMBOL(__devm_of_mdiobus_register);
 #endif /* CONFIG_OF_MDIO */
 
 MODULE_LICENSE("GPL");
index 8a13b1ad9a330b599742fc40a5d540ec3dcc0c3f..62bf99e45af16d86ff80428af812e34183888715 100644 (file)
@@ -280,12 +280,9 @@ static int vsc85xx_wol_set(struct phy_device *phydev,
        u16 pwd[3] = {0, 0, 0};
        struct ethtool_wolinfo *wol_conf = wol;
 
-       mutex_lock(&phydev->lock);
        rc = phy_select_page(phydev, MSCC_PHY_PAGE_EXTENDED_2);
-       if (rc < 0) {
-               rc = phy_restore_page(phydev, rc, rc);
-               goto out_unlock;
-       }
+       if (rc < 0)
+               return phy_restore_page(phydev, rc, rc);
 
        if (wol->wolopts & WAKE_MAGIC) {
                /* Store the device address for the magic packet */
@@ -323,7 +320,7 @@ static int vsc85xx_wol_set(struct phy_device *phydev,
 
        rc = phy_restore_page(phydev, rc, rc > 0 ? 0 : rc);
        if (rc < 0)
-               goto out_unlock;
+               return rc;
 
        if (wol->wolopts & WAKE_MAGIC) {
                /* Enable the WOL interrupt */
@@ -331,22 +328,19 @@ static int vsc85xx_wol_set(struct phy_device *phydev,
                reg_val |= MII_VSC85XX_INT_MASK_WOL;
                rc = phy_write(phydev, MII_VSC85XX_INT_MASK, reg_val);
                if (rc)
-                       goto out_unlock;
+                       return rc;
        } else {
                /* Disable the WOL interrupt */
                reg_val = phy_read(phydev, MII_VSC85XX_INT_MASK);
                reg_val &= (~MII_VSC85XX_INT_MASK_WOL);
                rc = phy_write(phydev, MII_VSC85XX_INT_MASK, reg_val);
                if (rc)
-                       goto out_unlock;
+                       return rc;
        }
        /* Clear WOL iterrupt status */
        reg_val = phy_read(phydev, MII_VSC85XX_INT_STATUS);
 
-out_unlock:
-       mutex_unlock(&phydev->lock);
-
-       return rc;
+       return 0;
 }
 
 static void vsc85xx_wol_get(struct phy_device *phydev,
@@ -358,10 +352,9 @@ static void vsc85xx_wol_get(struct phy_device *phydev,
        u16 pwd[3] = {0, 0, 0};
        struct ethtool_wolinfo *wol_conf = wol;
 
-       mutex_lock(&phydev->lock);
        rc = phy_select_page(phydev, MSCC_PHY_PAGE_EXTENDED_2);
        if (rc < 0)
-               goto out_unlock;
+               goto out_restore_page;
 
        reg_val = __phy_read(phydev, MSCC_PHY_WOL_MAC_CONTROL);
        if (reg_val & SECURE_ON_ENABLE)
@@ -377,9 +370,8 @@ static void vsc85xx_wol_get(struct phy_device *phydev,
                }
        }
 
-out_unlock:
+out_restore_page:
        phy_restore_page(phydev, rc, rc > 0 ? 0 : rc);
-       mutex_unlock(&phydev->lock);
 }
 
 #if IS_ENABLED(CONFIG_OF_MDIO)
index 047c581457e34510236133a6ff4ff292a6c06385..5813b07242ce16486aea67f42cd86ffbc49d7cf9 100644 (file)
@@ -79,7 +79,7 @@
 #define SGMII_ABILITY                  BIT(0)
 
 #define VEND1_MII_BASIC_CONFIG         0xAFC6
-#define MII_BASIC_CONFIG_REV           BIT(8)
+#define MII_BASIC_CONFIG_REV           BIT(4)
 #define MII_BASIC_CONFIG_SGMII         0x9
 #define MII_BASIC_CONFIG_RGMII         0x7
 #define MII_BASIC_CONFIG_RMII          0x5
index b33e55a7364e6e697088cf97de5c110c2a2c20af..99a07eb54c441cec6c4ea7e24b3e89ec3618f516 100644 (file)
@@ -57,6 +57,18 @@ static const char *phy_state_to_str(enum phy_state st)
        return NULL;
 }
 
+static void phy_process_state_change(struct phy_device *phydev,
+                                    enum phy_state old_state)
+{
+       if (old_state != phydev->state) {
+               phydev_dbg(phydev, "PHY state change %s -> %s\n",
+                          phy_state_to_str(old_state),
+                          phy_state_to_str(phydev->state));
+               if (phydev->drv && phydev->drv->link_change_notify)
+                       phydev->drv->link_change_notify(phydev);
+       }
+}
+
 static void phy_link_up(struct phy_device *phydev)
 {
        phydev->phy_link_change(phydev, true);
@@ -1301,6 +1313,7 @@ EXPORT_SYMBOL(phy_free_interrupt);
 void phy_stop(struct phy_device *phydev)
 {
        struct net_device *dev = phydev->attached_dev;
+       enum phy_state old_state;
 
        if (!phy_is_started(phydev) && phydev->state != PHY_DOWN) {
                WARN(1, "called from state %s\n",
@@ -1309,6 +1322,7 @@ void phy_stop(struct phy_device *phydev)
        }
 
        mutex_lock(&phydev->lock);
+       old_state = phydev->state;
 
        if (phydev->state == PHY_CABLETEST) {
                phy_abort_cable_test(phydev);
@@ -1319,6 +1333,7 @@ void phy_stop(struct phy_device *phydev)
                sfp_upstream_stop(phydev->sfp_bus);
 
        phydev->state = PHY_HALTED;
+       phy_process_state_change(phydev, old_state);
 
        mutex_unlock(&phydev->lock);
 
@@ -1436,13 +1451,7 @@ void phy_state_machine(struct work_struct *work)
        if (err < 0)
                phy_error(phydev);
 
-       if (old_state != phydev->state) {
-               phydev_dbg(phydev, "PHY state change %s -> %s\n",
-                          phy_state_to_str(old_state),
-                          phy_state_to_str(phydev->state));
-               if (phydev->drv && phydev->drv->link_change_notify)
-                       phydev->drv->link_change_notify(phydev);
-       }
+       phy_process_state_change(phydev, old_state);
 
        /* Only re-schedule a PHY state machine change if we are polling the
         * PHY, if PHY_MAC_INTERRUPT is set, then we will be moving
index c02cad6478a816f3a9c9818fd2af9ff3eec783e7..fb98db61e06cb56e6f0ca78f63e5dbea00f3cd58 100644 (file)
@@ -2190,6 +2190,11 @@ static void sfp_sm_module(struct sfp *sfp, unsigned int event)
                        break;
                }
 
+               /* Force a poll to re-read the hardware signal state after
+                * sfp_sm_mod_probe() changed state_hw_mask.
+                */
+               mod_delayed_work(system_wq, &sfp->poll, 1);
+
                err = sfp_hwmon_insert(sfp);
                if (err)
                        dev_warn(sfp->dev, "hwmon probe failed: %pe\n",
index 00d9eff91dcfaf34c4fae700e6d7dbcbc97f22cd..df2c5435c5c49ee828d625c36e39f4e2d2af0216 100644 (file)
@@ -199,8 +199,11 @@ static int lan95xx_config_aneg_ext(struct phy_device *phydev)
 static int lan87xx_read_status(struct phy_device *phydev)
 {
        struct smsc_phy_priv *priv = phydev->priv;
+       int err;
 
-       int err = genphy_read_status(phydev);
+       err = genphy_read_status(phydev);
+       if (err)
+               return err;
 
        if (!phydev->link && priv->energy_enable && phydev->irq == PHY_POLL) {
                /* Disable EDPD to wake up PHY */
index 743cbf5d662c99c9d5ea390478d9516c2f4a871d..f7cff58fe0449313748237d53ff69191b85e5e5e 100644 (file)
@@ -666,8 +666,9 @@ static int asix_resume(struct usb_interface *intf)
 static int ax88772_init_mdio(struct usbnet *dev)
 {
        struct asix_common_private *priv = dev->driver_priv;
+       int ret;
 
-       priv->mdio = devm_mdiobus_alloc(&dev->udev->dev);
+       priv->mdio = mdiobus_alloc();
        if (!priv->mdio)
                return -ENOMEM;
 
@@ -679,7 +680,20 @@ static int ax88772_init_mdio(struct usbnet *dev)
        snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
                 dev->udev->bus->busnum, dev->udev->devnum);
 
-       return devm_mdiobus_register(&dev->udev->dev, priv->mdio);
+       ret = mdiobus_register(priv->mdio);
+       if (ret) {
+               netdev_err(dev->net, "Could not register MDIO bus (err %d)\n", ret);
+               mdiobus_free(priv->mdio);
+               priv->mdio = NULL;
+       }
+
+       return ret;
+}
+
+static void ax88772_mdio_unregister(struct asix_common_private *priv)
+{
+       mdiobus_unregister(priv->mdio);
+       mdiobus_free(priv->mdio);
 }
 
 static int ax88772_init_phy(struct usbnet *dev)
@@ -896,16 +910,23 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
 
        ret = ax88772_init_mdio(dev);
        if (ret)
-               return ret;
+               goto mdio_err;
 
        ret = ax88772_phylink_setup(dev);
        if (ret)
-               return ret;
+               goto phylink_err;
 
        ret = ax88772_init_phy(dev);
        if (ret)
-               phylink_destroy(priv->phylink);
+               goto initphy_err;
 
+       return 0;
+
+initphy_err:
+       phylink_destroy(priv->phylink);
+phylink_err:
+       ax88772_mdio_unregister(priv);
+mdio_err:
        return ret;
 }
 
@@ -926,6 +947,7 @@ static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf)
        phylink_disconnect_phy(priv->phylink);
        rtnl_unlock();
        phylink_destroy(priv->phylink);
+       ax88772_mdio_unregister(priv);
        asix_rx_fixup_common_free(dev->driver_priv);
 }
 
index 068488890d57be2f4a1c31ae72ecef518775f110..c458c030fadf6cbaa7a55a4e35344bf2a01122a4 100644 (file)
@@ -3579,13 +3579,29 @@ static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb,
                size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
                align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
 
+               if (unlikely(size > skb->len)) {
+                       netif_dbg(dev, rx_err, dev->net,
+                                 "size err rx_cmd_a=0x%08x\n",
+                                 rx_cmd_a);
+                       return 0;
+               }
+
                if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
                        netif_dbg(dev, rx_err, dev->net,
                                  "Error rx_cmd_a=0x%08x", rx_cmd_a);
                } else {
-                       u32 frame_len = size - ETH_FCS_LEN;
+                       u32 frame_len;
                        struct sk_buff *skb2;
 
+                       if (unlikely(size < ETH_FCS_LEN)) {
+                               netif_dbg(dev, rx_err, dev->net,
+                                         "size err rx_cmd_a=0x%08x\n",
+                                         rx_cmd_a);
+                               return 0;
+                       }
+
+                       frame_len = size - ETH_FCS_LEN;
+
                        skb2 = napi_alloc_skb(&dev->napi, frame_len);
                        if (!skb2)
                                return 0;
index 7a2b0094de51f53dd2161fce0ff04193a1f85654..2894114858a295aaea4f04666db198cff5e6b3cb 100644 (file)
@@ -61,12 +61,6 @@ pl_vendor_req(struct usbnet *dev, u8 req, u8 val, u8 index)
                                val, index, NULL, 0);
 }
 
-static inline int
-pl_clear_QuickLink_features(struct usbnet *dev, int val)
-{
-       return pl_vendor_req(dev, 1, (u8) val, 0);
-}
-
 static inline int
 pl_set_QuickLink_features(struct usbnet *dev, int val)
 {
index 95de452ff4dad58f05e571b1e569a4d46a9129e2..5d6454fedb3f17707f7cd8a3b963da08e67c76c9 100644 (file)
@@ -2200,6 +2200,13 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
                size = (rx_cmd_a & RX_CMD_A_LEN) - RXW_PADDING;
                align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
 
+               if (unlikely(size > skb->len)) {
+                       netif_dbg(dev, rx_err, dev->net,
+                                 "size err rx_cmd_a=0x%08x\n",
+                                 rx_cmd_a);
+                       return 0;
+               }
+
                if (unlikely(rx_cmd_a & RX_CMD_A_RED)) {
                        netif_dbg(dev, rx_err, dev->net,
                                  "Error rx_cmd_a=0x%08x\n", rx_cmd_a);
index 32d2c60d334dc75abb495cca3b752b2aa64aad9a..563ecd27b93ea56441daf150aecfa2f51219e6cc 100644 (file)
@@ -1833,6 +1833,12 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
                size = (u16)((header & RX_STS_FL_) >> 16);
                align_count = (4 - ((size + NET_IP_ALIGN) % 4)) % 4;
 
+               if (unlikely(size > skb->len)) {
+                       netif_dbg(dev, rx_err, dev->net,
+                                 "size err header=0x%08x\n", header);
+                       return 0;
+               }
+
                if (unlikely(header & RX_STS_ES_)) {
                        netif_dbg(dev, rx_err, dev->net,
                                  "Error header=0x%08x\n", header);
index 1bb54de7124d95e4974c158017ac6611a370c4d8..c1178915496d80da7e8c63c91bf395444ce3194f 100644 (file)
@@ -708,7 +708,8 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
        u32 frame_sz;
 
        if (skb_shared(skb) || skb_head_is_locked(skb) ||
-           skb_shinfo(skb)->nr_frags) {
+           skb_shinfo(skb)->nr_frags ||
+           skb_headroom(skb) < XDP_PACKET_HEADROOM) {
                u32 size, len, max_head_size, off;
                struct sk_buff *nskb;
                struct page *page;
@@ -773,9 +774,6 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
 
                consume_skb(skb);
                skb = nskb;
-       } else if (skb_headroom(skb) < XDP_PACKET_HEADROOM &&
-                  pskb_expand_head(skb, VETH_XDP_HEADROOM, 0, GFP_ATOMIC)) {
-               goto drop;
        }
 
        /* SKB "head" area always have tailroom for skb_shared_info */
@@ -1257,6 +1255,26 @@ static int veth_enable_range_safe(struct net_device *dev, int start, int end)
        return 0;
 }
 
+static void veth_set_xdp_features(struct net_device *dev)
+{
+       struct veth_priv *priv = netdev_priv(dev);
+       struct net_device *peer;
+
+       peer = rtnl_dereference(priv->peer);
+       if (peer && peer->real_num_tx_queues <= dev->real_num_rx_queues) {
+               xdp_features_t val = NETDEV_XDP_ACT_BASIC |
+                                    NETDEV_XDP_ACT_REDIRECT |
+                                    NETDEV_XDP_ACT_RX_SG;
+
+               if (priv->_xdp_prog || veth_gro_requested(dev))
+                       val |= NETDEV_XDP_ACT_NDO_XMIT |
+                              NETDEV_XDP_ACT_NDO_XMIT_SG;
+               xdp_set_features_flag(dev, val);
+       } else {
+               xdp_clear_features_flag(dev);
+       }
+}
+
 static int veth_set_channels(struct net_device *dev,
                             struct ethtool_channels *ch)
 {
@@ -1323,6 +1341,12 @@ out:
                if (peer)
                        netif_carrier_on(peer);
        }
+
+       /* update XDP supported features */
+       veth_set_xdp_features(dev);
+       if (peer)
+               veth_set_xdp_features(peer);
+
        return err;
 
 revert:
@@ -1489,7 +1513,10 @@ static int veth_set_features(struct net_device *dev,
                err = veth_napi_enable(dev);
                if (err)
                        return err;
+
+               xdp_features_set_redirect_target(dev, true);
        } else {
+               xdp_features_clear_redirect_target(dev);
                veth_napi_del(dev);
        }
        return 0;
@@ -1570,10 +1597,15 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
                        peer->hw_features &= ~NETIF_F_GSO_SOFTWARE;
                        peer->max_mtu = max_mtu;
                }
+
+               xdp_features_set_redirect_target(dev, true);
        }
 
        if (old_prog) {
                if (!prog) {
+                       if (!veth_gro_requested(dev))
+                               xdp_features_clear_redirect_target(dev);
+
                        if (dev->flags & IFF_UP)
                                veth_disable_xdp(dev);
 
@@ -1610,7 +1642,7 @@ static int veth_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
        struct veth_xdp_buff *_ctx = (void *)ctx;
 
        if (!_ctx->skb)
-               return -EOPNOTSUPP;
+               return -ENODATA;
 
        *timestamp = skb_hwtstamps(_ctx->skb)->hwtstamp;
        return 0;
@@ -1621,7 +1653,7 @@ static int veth_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash)
        struct veth_xdp_buff *_ctx = (void *)ctx;
 
        if (!_ctx->skb)
-               return -EOPNOTSUPP;
+               return -ENODATA;
 
        *hash = skb_get_hash(_ctx->skb);
        return 0;
@@ -1686,10 +1718,6 @@ static void veth_setup(struct net_device *dev)
        dev->hw_enc_features = VETH_FEATURES;
        dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE;
        netif_set_tso_max_size(dev, GSO_MAX_SIZE);
-
-       dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
-                           NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_RX_SG |
-                           NETDEV_XDP_ACT_NDO_XMIT_SG;
 }
 
 /*
@@ -1857,6 +1885,10 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
                goto err_queues;
 
        veth_disable_gro(dev);
+       /* update XDP supported features */
+       veth_set_xdp_features(dev);
+       veth_set_xdp_features(peer);
+
        return 0;
 
 err_queues:
index fb5e68ed3ec27c1895f96108e35ee7b4832595b6..2396c28c01221956c12d6b1611efc020ac76547d 100644 (file)
@@ -446,7 +446,8 @@ static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
 static struct sk_buff *page_to_skb(struct virtnet_info *vi,
                                   struct receive_queue *rq,
                                   struct page *page, unsigned int offset,
-                                  unsigned int len, unsigned int truesize)
+                                  unsigned int len, unsigned int truesize,
+                                  unsigned int headroom)
 {
        struct sk_buff *skb;
        struct virtio_net_hdr_mrg_rxbuf *hdr;
@@ -464,11 +465,11 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
        else
                hdr_padded_len = sizeof(struct padded_vnet_hdr);
 
-       buf = p;
+       buf = p - headroom;
        len -= hdr_len;
        offset += hdr_padded_len;
        p += hdr_padded_len;
-       tailroom = truesize - hdr_padded_len - len;
+       tailroom = truesize - headroom  - hdr_padded_len - len;
 
        shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 
@@ -545,6 +546,87 @@ ok:
        return skb;
 }
 
+static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
+{
+       unsigned int len;
+       unsigned int packets = 0;
+       unsigned int bytes = 0;
+       void *ptr;
+
+       while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
+               if (likely(!is_xdp_frame(ptr))) {
+                       struct sk_buff *skb = ptr;
+
+                       pr_debug("Sent skb %p\n", skb);
+
+                       bytes += skb->len;
+                       napi_consume_skb(skb, in_napi);
+               } else {
+                       struct xdp_frame *frame = ptr_to_xdp(ptr);
+
+                       bytes += xdp_get_frame_len(frame);
+                       xdp_return_frame(frame);
+               }
+               packets++;
+       }
+
+       /* Avoid overhead when no packets have been processed
+        * happens when called speculatively from start_xmit.
+        */
+       if (!packets)
+               return;
+
+       u64_stats_update_begin(&sq->stats.syncp);
+       sq->stats.bytes += bytes;
+       sq->stats.packets += packets;
+       u64_stats_update_end(&sq->stats.syncp);
+}
+
+static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
+{
+       if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
+               return false;
+       else if (q < vi->curr_queue_pairs)
+               return true;
+       else
+               return false;
+}
+
+static void check_sq_full_and_disable(struct virtnet_info *vi,
+                                     struct net_device *dev,
+                                     struct send_queue *sq)
+{
+       bool use_napi = sq->napi.weight;
+       int qnum;
+
+       qnum = sq - vi->sq;
+
+       /* If running out of space, stop queue to avoid getting packets that we
+        * are then unable to transmit.
+        * An alternative would be to force queuing layer to requeue the skb by
+        * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
+        * returned in a normal path of operation: it means that driver is not
+        * maintaining the TX queue stop/start state properly, and causes
+        * the stack to do a non-trivial amount of useless work.
+        * Since most packets only take 1 or 2 ring slots, stopping the queue
+        * early means 16 slots are typically wasted.
+        */
+       if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
+               netif_stop_subqueue(dev, qnum);
+               if (use_napi) {
+                       if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
+                               virtqueue_napi_schedule(&sq->napi, sq->vq);
+               } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
+                       /* More just got used, free them then recheck. */
+                       free_old_xmit_skbs(sq, false);
+                       if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
+                               netif_start_subqueue(dev, qnum);
+                               virtqueue_disable_cb(sq->vq);
+                       }
+               }
+       }
+}
+
 static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
                                   struct send_queue *sq,
                                   struct xdp_frame *xdpf)
@@ -686,6 +768,9 @@ static int virtnet_xdp_xmit(struct net_device *dev,
        }
        ret = nxmit;
 
+       if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq))
+               check_sq_full_and_disable(vi, dev, sq);
+
        if (flags & XDP_XMIT_FLUSH) {
                if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
                        kicks = 1;
@@ -925,7 +1010,7 @@ static struct sk_buff *receive_big(struct net_device *dev,
 {
        struct page *page = buf;
        struct sk_buff *skb =
-               page_to_skb(vi, rq, page, 0, len, PAGE_SIZE);
+               page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0);
 
        stats->bytes += len - vi->hdr_len;
        if (unlikely(!skb))
@@ -1188,9 +1273,12 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
 
                switch (act) {
                case XDP_PASS:
+                       head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz);
+                       if (unlikely(!head_skb))
+                               goto err_xdp_frags;
+
                        if (unlikely(xdp_page != page))
                                put_page(page);
-                       head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz);
                        rcu_read_unlock();
                        return head_skb;
                case XDP_TX:
@@ -1248,7 +1336,7 @@ err_xdp_frags:
        rcu_read_unlock();
 
 skip_xdp:
-       head_skb = page_to_skb(vi, rq, page, offset, len, truesize);
+       head_skb = page_to_skb(vi, rq, page, offset, len, truesize, headroom);
        curr_skb = head_skb;
 
        if (unlikely(!curr_skb))
@@ -1714,52 +1802,6 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
        return stats.packets;
 }
 
-static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
-{
-       unsigned int len;
-       unsigned int packets = 0;
-       unsigned int bytes = 0;
-       void *ptr;
-
-       while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
-               if (likely(!is_xdp_frame(ptr))) {
-                       struct sk_buff *skb = ptr;
-
-                       pr_debug("Sent skb %p\n", skb);
-
-                       bytes += skb->len;
-                       napi_consume_skb(skb, in_napi);
-               } else {
-                       struct xdp_frame *frame = ptr_to_xdp(ptr);
-
-                       bytes += xdp_get_frame_len(frame);
-                       xdp_return_frame(frame);
-               }
-               packets++;
-       }
-
-       /* Avoid overhead when no packets have been processed
-        * happens when called speculatively from start_xmit.
-        */
-       if (!packets)
-               return;
-
-       u64_stats_update_begin(&sq->stats.syncp);
-       sq->stats.bytes += bytes;
-       sq->stats.packets += packets;
-       u64_stats_update_end(&sq->stats.syncp);
-}
-
-static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
-{
-       if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
-               return false;
-       else if (q < vi->curr_queue_pairs)
-               return true;
-       else
-               return false;
-}
-
 static void virtnet_poll_cleantx(struct receive_queue *rq)
 {
        struct virtnet_info *vi = rq->vq->vdev->priv;
@@ -1989,30 +2031,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
                nf_reset_ct(skb);
        }
 
-       /* If running out of space, stop queue to avoid getting packets that we
-        * are then unable to transmit.
-        * An alternative would be to force queuing layer to requeue the skb by
-        * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
-        * returned in a normal path of operation: it means that driver is not
-        * maintaining the TX queue stop/start state properly, and causes
-        * the stack to do a non-trivial amount of useless work.
-        * Since most packets only take 1 or 2 ring slots, stopping the queue
-        * early means 16 slots are typically wasted.
-        */
-       if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
-               netif_stop_subqueue(dev, qnum);
-               if (use_napi) {
-                       if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
-                               virtqueue_napi_schedule(&sq->napi, sq->vq);
-               } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
-                       /* More just got used, free them then recheck. */
-                       free_old_xmit_skbs(sq, false);
-                       if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
-                               netif_start_subqueue(dev, qnum);
-                               virtqueue_disable_cb(sq->vq);
-                       }
-               }
-       }
+       check_sq_full_and_disable(vi, dev, sq);
 
        if (kick || netif_xmit_stopped(txq)) {
                if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
index 1c53b5546927052a336bcefabd6876c999f6f1c2..47c2ad7a3e429ae03fc732024728b113fe77d87e 100644 (file)
@@ -1177,14 +1177,9 @@ static int ucc_hdlc_probe(struct platform_device *pdev)
        uhdlc_priv->dev = &pdev->dev;
        uhdlc_priv->ut_info = ut_info;
 
-       if (of_get_property(np, "fsl,tdm-interface", NULL))
-               uhdlc_priv->tsa = 1;
-
-       if (of_get_property(np, "fsl,ucc-internal-loopback", NULL))
-               uhdlc_priv->loopback = 1;
-
-       if (of_get_property(np, "fsl,hdlc-bus", NULL))
-               uhdlc_priv->hdlc_bus = 1;
+       uhdlc_priv->tsa = of_property_read_bool(np, "fsl,tdm-interface");
+       uhdlc_priv->loopback = of_property_read_bool(np, "fsl,ucc-internal-loopback");
+       uhdlc_priv->hdlc_bus = of_property_read_bool(np, "fsl,hdlc-bus");
 
        if (uhdlc_priv->tsa == 1) {
                utdm = kzalloc(sizeof(*utdm), GFP_KERNEL);
index 565522466eba5465f15be91a72b370f40afae71e..b55b1b17f4d197cf32775699087702bae2396bff 100644 (file)
@@ -732,7 +732,10 @@ void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
 
        rcu_read_lock();
        do {
-               while (likely(!mvmtxq->stopped &&
+               while (likely(!test_bit(IWL_MVM_TXQ_STATE_STOP_FULL,
+                                       &mvmtxq->state) &&
+                             !test_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT,
+                                       &mvmtxq->state) &&
                              !test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status))) {
                        skb = ieee80211_tx_dequeue(hw, txq);
 
@@ -757,42 +760,25 @@ static void iwl_mvm_mac_wake_tx_queue(struct ieee80211_hw *hw,
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
        struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);
 
-       /*
-        * Please note that racing is handled very carefully here:
-        * mvmtxq->txq_id is updated during allocation, and mvmtxq->list is
-        * deleted afterwards.
-        * This means that if:
-        * mvmtxq->txq_id != INVALID_QUEUE && list_empty(&mvmtxq->list):
-        *      queue is allocated and we can TX.
-        * mvmtxq->txq_id != INVALID_QUEUE && !list_empty(&mvmtxq->list):
-        *      a race, should defer the frame.
-        * mvmtxq->txq_id == INVALID_QUEUE && list_empty(&mvmtxq->list):
-        *      need to allocate the queue and defer the frame.
-        * mvmtxq->txq_id == INVALID_QUEUE && !list_empty(&mvmtxq->list):
-        *      queue is already scheduled for allocation, no need to allocate,
-        *      should defer the frame.
-        */
-
-       /* If the queue is allocated TX and return. */
-       if (!txq->sta || mvmtxq->txq_id != IWL_MVM_INVALID_QUEUE) {
-               /*
-                * Check that list is empty to avoid a race where txq_id is
-                * already updated, but the queue allocation work wasn't
-                * finished
-                */
-               if (unlikely(txq->sta && !list_empty(&mvmtxq->list)))
-                       return;
-
+       if (likely(test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) ||
+           !txq->sta) {
                iwl_mvm_mac_itxq_xmit(hw, txq);
                return;
        }
 
-       /* The list is being deleted only after the queue is fully allocated. */
-       if (!list_empty(&mvmtxq->list))
-               return;
+       /* iwl_mvm_mac_itxq_xmit() will later be called by the worker
+        * to handle any packets we leave on the txq now
+        */
 
-       list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs);
-       schedule_work(&mvm->add_stream_wk);
+       spin_lock_bh(&mvm->add_stream_lock);
+       /* The list is being deleted only after the queue is fully allocated. */
+       if (list_empty(&mvmtxq->list) &&
+           /* recheck under lock */
+           !test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) {
+               list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs);
+               schedule_work(&mvm->add_stream_wk);
+       }
+       spin_unlock_bh(&mvm->add_stream_lock);
 }
 
 #define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...)          \
index 90bc95d96a78e8894a5b5bc6f0eba4d8ac9037da..f307c345dfa0b4b9b815daa717f51751d17cdf0d 100644 (file)
@@ -729,7 +729,10 @@ struct iwl_mvm_txq {
        struct list_head list;
        u16 txq_id;
        atomic_t tx_request;
-       bool stopped;
+#define IWL_MVM_TXQ_STATE_STOP_FULL    0
+#define IWL_MVM_TXQ_STATE_STOP_REDIRECT        1
+#define IWL_MVM_TXQ_STATE_READY                2
+       unsigned long state;
 };
 
 static inline struct iwl_mvm_txq *
@@ -827,6 +830,7 @@ struct iwl_mvm {
                struct iwl_mvm_tvqm_txq_info tvqm_info[IWL_MAX_TVQM_QUEUES];
        };
        struct work_struct add_stream_wk; /* To add streams to queues */
+       spinlock_t add_stream_lock;
 
        const char *nvm_file_name;
        struct iwl_nvm_data *nvm_data;
index f4e9446d9dc2d959b52fa6ab84ae12573f050141..9711841bb4564d33b8edaab378ced12db0a295b6 100644 (file)
@@ -1195,6 +1195,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk);
        INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
        INIT_LIST_HEAD(&mvm->add_stream_txqs);
+       spin_lock_init(&mvm->add_stream_lock);
 
        init_waitqueue_head(&mvm->rx_sync_waitq);
 
@@ -1691,7 +1692,10 @@ static void iwl_mvm_queue_state_change(struct iwl_op_mode *op_mode,
 
                txq = sta->txq[tid];
                mvmtxq = iwl_mvm_txq_from_mac80211(txq);
-               mvmtxq->stopped = !start;
+               if (start)
+                       clear_bit(IWL_MVM_TXQ_STATE_STOP_FULL, &mvmtxq->state);
+               else
+                       set_bit(IWL_MVM_TXQ_STATE_STOP_FULL, &mvmtxq->state);
 
                if (start && mvmsta->sta_state != IEEE80211_STA_NOTEXIST)
                        iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
index 69634fb82a9bf104530e04eac0d3e6f44dbff9c8..9caae77995ca933cdce204fac7663c51213f9389 100644 (file)
@@ -384,8 +384,11 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                struct iwl_mvm_txq *mvmtxq =
                        iwl_mvm_txq_from_tid(sta, tid);
 
-               mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
+               spin_lock_bh(&mvm->add_stream_lock);
                list_del_init(&mvmtxq->list);
+               clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
+               mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
+               spin_unlock_bh(&mvm->add_stream_lock);
        }
 
        /* Regardless if this is a reserved TXQ for a STA - mark it as false */
@@ -479,8 +482,11 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
                        disable_agg_tids |= BIT(tid);
                mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
 
-               mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
+               spin_lock_bh(&mvm->add_stream_lock);
                list_del_init(&mvmtxq->list);
+               clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
+               mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
+               spin_unlock_bh(&mvm->add_stream_lock);
        }
 
        mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
@@ -693,7 +699,7 @@ static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid,
                            queue, iwl_mvm_ac_to_tx_fifo[ac]);
 
        /* Stop the queue and wait for it to empty */
-       txq->stopped = true;
+       set_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT, &txq->state);
 
        ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
        if (ret) {
@@ -736,7 +742,7 @@ static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid,
 
 out:
        /* Continue using the queue */
-       txq->stopped = false;
+       clear_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT, &txq->state);
 
        return ret;
 }
@@ -1444,12 +1450,22 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
                 * a queue in the function itself.
                 */
                if (iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid)) {
+                       spin_lock_bh(&mvm->add_stream_lock);
                        list_del_init(&mvmtxq->list);
+                       spin_unlock_bh(&mvm->add_stream_lock);
                        continue;
                }
 
-               list_del_init(&mvmtxq->list);
+               /* now we're ready, any remaining races/concurrency will be
+                * handled in iwl_mvm_mac_itxq_xmit()
+                */
+               set_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
+
                local_bh_disable();
+               spin_lock(&mvm->add_stream_lock);
+               list_del_init(&mvmtxq->list);
+               spin_unlock(&mvm->add_stream_lock);
+
                iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
                local_bh_enable();
        }
@@ -1864,8 +1880,11 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
                struct iwl_mvm_txq *mvmtxq =
                        iwl_mvm_txq_from_mac80211(sta->txq[i]);
 
+               spin_lock_bh(&mvm->add_stream_lock);
                mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
                list_del_init(&mvmtxq->list);
+               clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
+               spin_unlock_bh(&mvm->add_stream_lock);
        }
 }
 
index 5dcf61761a165ae0d1b9c19cd84f4e740c47a09a..9a698a16a8f38e8cde08ff078b3021cb042224db 100644 (file)
@@ -172,7 +172,7 @@ static const struct mwifiex_pcie_device mwifiex_pcie8997 = {
        .can_ext_scan = true,
 };
 
-static const struct of_device_id mwifiex_pcie_of_match_table[] = {
+static const struct of_device_id mwifiex_pcie_of_match_table[] __maybe_unused = {
        { .compatible = "pci11ab,2b42" },
        { .compatible = "pci1b4b,2b42" },
        { }
index c64e24c10ea659908cd697321ac6adc996ddf835..a24bd40dd41ab88cdc7bd3ddcee7927df796e0fc 100644 (file)
@@ -495,7 +495,7 @@ static struct memory_type_mapping mem_type_mapping_tbl[] = {
        {"EXTLAST", NULL, 0, 0xFE},
 };
 
-static const struct of_device_id mwifiex_sdio_of_match_table[] = {
+static const struct of_device_id mwifiex_sdio_of_match_table[] __maybe_unused = {
        { .compatible = "marvell,sd8787" },
        { .compatible = "marvell,sd8897" },
        { .compatible = "marvell,sd8978" },
index b117e4467c87033a130b9a9bd44c14eb1b5fb015..34abf70f44aff8cf45f0e8d0d706643d3a10a06c 100644 (file)
@@ -539,6 +539,7 @@ int mt76_register_phy(struct mt76_phy *phy, bool vht,
        if (ret)
                return ret;
 
+       set_bit(MT76_STATE_REGISTERED, &phy->state);
        phy->dev->phys[phy->band_idx] = phy;
 
        return 0;
@@ -549,6 +550,9 @@ void mt76_unregister_phy(struct mt76_phy *phy)
 {
        struct mt76_dev *dev = phy->dev;
 
+       if (!test_bit(MT76_STATE_REGISTERED, &phy->state))
+               return;
+
        if (IS_ENABLED(CONFIG_MT76_LEDS))
                mt76_led_cleanup(phy);
        mt76_tx_status_check(dev, true);
@@ -719,6 +723,7 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
                return ret;
 
        WARN_ON(mt76_worker_setup(hw, &dev->tx_worker, NULL, "tx"));
+       set_bit(MT76_STATE_REGISTERED, &phy->state);
        sched_set_fifo_low(dev->tx_worker.task);
 
        return 0;
@@ -729,6 +734,9 @@ void mt76_unregister_device(struct mt76_dev *dev)
 {
        struct ieee80211_hw *hw = dev->hw;
 
+       if (!test_bit(MT76_STATE_REGISTERED, &dev->phy.state))
+               return;
+
        if (IS_ENABLED(CONFIG_MT76_LEDS))
                mt76_led_cleanup(&dev->phy);
        mt76_tx_status_check(dev, true);
index ccca0162c8f82cd3983a43dffad4cef8daff4952..183b0fc5a2d48c61d7cd5f554cc4b95a5cd74b74 100644 (file)
@@ -402,6 +402,7 @@ struct mt76_tx_cb {
 
 enum {
        MT76_STATE_INITIALIZED,
+       MT76_STATE_REGISTERED,
        MT76_STATE_RUNNING,
        MT76_STATE_MCU_RUNNING,
        MT76_SCANNING,
index efb9bfaa187f295441f01ddf67ddcdf6eb314b62..008ece1b16f8ebc9b5c122c82723847aaac79ba7 100644 (file)
@@ -1221,6 +1221,9 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_ba_tlv);
 
 int mt76_connac_mcu_sta_wed_update(struct mt76_dev *dev, struct sk_buff *skb)
 {
+       if (!mt76_is_mmio(dev))
+               return 0;
+
        if (!mtk_wed_device_active(&dev->mmio.wed))
                return 0;
 
index 1ab768feccaac0f9511351d090e131d12e9d83b3..5e288116b1b010805269076eceaf8a7f82a2b4e9 100644 (file)
@@ -383,7 +383,6 @@ mt7915_init_wiphy(struct mt7915_phy *phy)
        ieee80211_hw_set(hw, SUPPORTS_RX_DECAP_OFFLOAD);
        ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
        ieee80211_hw_set(hw, WANT_MONITOR_VIF);
-       ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
 
        hw->max_tx_fragments = 4;
 
@@ -396,6 +395,9 @@ mt7915_init_wiphy(struct mt7915_phy *phy)
        }
 
        if (phy->mt76->cap.has_5ghz) {
+               struct ieee80211_sta_vht_cap *vht_cap;
+
+               vht_cap = &phy->mt76->sband_5g.sband.vht_cap;
                phy->mt76->sband_5g.sband.ht_cap.cap |=
                        IEEE80211_HT_CAP_LDPC_CODING |
                        IEEE80211_HT_CAP_MAX_AMSDU;
@@ -403,19 +405,28 @@ mt7915_init_wiphy(struct mt7915_phy *phy)
                        IEEE80211_HT_MPDU_DENSITY_4;
 
                if (is_mt7915(&dev->mt76)) {
-                       phy->mt76->sband_5g.sband.vht_cap.cap |=
+                       vht_cap->cap |=
                                IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 |
                                IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
+
+                       if (!dev->dbdc_support)
+                               vht_cap->cap |=
+                                       IEEE80211_VHT_CAP_SHORT_GI_160 |
+                                       IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ |
+                                       FIELD_PREP(IEEE80211_VHT_CAP_EXT_NSS_BW_MASK, 1);
                } else {
-                       phy->mt76->sband_5g.sband.vht_cap.cap |=
+                       vht_cap->cap |=
                                IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
                                IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
 
                        /* mt7916 dbdc with 2g 2x2 bw40 and 5g 2x2 bw160c */
-                       phy->mt76->sband_5g.sband.vht_cap.cap |=
+                       vht_cap->cap |=
                                IEEE80211_VHT_CAP_SHORT_GI_160 |
                                IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
                }
+
+               if (!is_mt7915(&dev->mt76) || !dev->dbdc_support)
+                       ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
        }
 
        mt76_set_stream_caps(phy->mt76, true);
@@ -841,9 +852,13 @@ mt7915_set_stream_he_txbf_caps(struct mt7915_phy *phy,
        int sts = hweight8(phy->mt76->chainmask);
        u8 c, sts_160 = sts;
 
-       /* mt7915 doesn't support bw160 */
-       if (is_mt7915(&dev->mt76))
-               sts_160 = 0;
+       /* Can do 1/2 of STS in 160Mhz mode for mt7915 */
+       if (is_mt7915(&dev->mt76)) {
+               if (!dev->dbdc_support)
+                       sts_160 /= 2;
+               else
+                       sts_160 = 0;
+       }
 
 #ifdef CONFIG_MAC80211_MESH
        if (vif == NL80211_IFTYPE_MESH_POINT)
@@ -944,10 +959,15 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
        int i, idx = 0, nss = hweight8(phy->mt76->antenna_mask);
        u16 mcs_map = 0;
        u16 mcs_map_160 = 0;
-       u8 nss_160 = nss;
+       u8 nss_160;
 
-       /* Can't do 160MHz with mt7915 */
-       if (is_mt7915(&dev->mt76))
+       if (!is_mt7915(&dev->mt76))
+               nss_160 = nss;
+       else if (!dev->dbdc_support)
+               /* Can do 1/2 of NSS streams in 160Mhz mode for mt7915 */
+               nss_160 = nss / 2;
+       else
+               /* Can't do 160MHz with mt7915 dbdc */
                nss_160 = 0;
 
        for (i = 0; i < 8; i++) {
index 2d2edddc77bdd146027b8ccaa73d3ff52b9a0b6c..3f88e6a0a510ed14f84a0d7a1e0147509293e262 100644 (file)
@@ -447,8 +447,7 @@ static int wlcore_probe_of(struct spi_device *spi, struct wl12xx_spi_glue *glue,
        dev_info(&spi->dev, "selected chip family is %s\n",
                 pdev_data->family->name);
 
-       if (of_find_property(dt_node, "clock-xtal", NULL))
-               pdev_data->ref_clock_xtal = true;
+       pdev_data->ref_clock_xtal = of_property_read_bool(dt_node, "clock-xtal");
 
        /* optional clock frequency params */
        of_property_read_u32(dt_node, "ref-clock-frequency",
index ed9c5e2cf3ad43feaa590820202ec844ee96ed6c..a187f0e0b0f7d1799f3f8d8d6ad36a801b7ca756 100644 (file)
@@ -175,6 +175,7 @@ static int pn533_usb_send_frame(struct pn533 *dev,
        print_hex_dump_debug("PN533 TX: ", DUMP_PREFIX_NONE, 16, 1,
                             out->data, out->len, false);
 
+       arg.phy = phy;
        init_completion(&arg.done);
        cntx = phy->out_urb->context;
        phy->out_urb->context = &arg;
index 755460a73c0dce184dbe1dbd42c93993b2b3aecd..d2aa9f766738ecfe898f19733f89cd459b5124aa 100644 (file)
@@ -282,13 +282,15 @@ EXPORT_SYMBOL(ndlc_probe);
 
 void ndlc_remove(struct llt_ndlc *ndlc)
 {
-       st_nci_remove(ndlc->ndev);
-
        /* cancel timers */
        del_timer_sync(&ndlc->t1_timer);
        del_timer_sync(&ndlc->t2_timer);
        ndlc->t2_active = false;
        ndlc->t1_active = false;
+       /* cancel work */
+       cancel_work_sync(&ndlc->sm_work);
+
+       st_nci_remove(ndlc->ndev);
 
        skb_queue_purge(&ndlc->rcv_q);
        skb_queue_purge(&ndlc->send_q);
index c2730b116dc680eac04df43a81c81a3826c5da16..53ef028596c61cec40bbd4c2e2e1f6ac91958586 100644 (file)
@@ -781,16 +781,26 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
                range = page_address(ns->ctrl->discard_page);
        }
 
-       __rq_for_each_bio(bio, req) {
-               u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
-               u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
-
-               if (n < segments) {
-                       range[n].cattr = cpu_to_le32(0);
-                       range[n].nlb = cpu_to_le32(nlb);
-                       range[n].slba = cpu_to_le64(slba);
+       if (queue_max_discard_segments(req->q) == 1) {
+               u64 slba = nvme_sect_to_lba(ns, blk_rq_pos(req));
+               u32 nlb = blk_rq_sectors(req) >> (ns->lba_shift - 9);
+
+               range[0].cattr = cpu_to_le32(0);
+               range[0].nlb = cpu_to_le32(nlb);
+               range[0].slba = cpu_to_le64(slba);
+               n = 1;
+       } else {
+               __rq_for_each_bio(bio, req) {
+                       u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
+                       u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
+
+                       if (n < segments) {
+                               range[n].cattr = cpu_to_le32(0);
+                               range[n].nlb = cpu_to_le32(nlb);
+                               range[n].slba = cpu_to_le64(slba);
+                       }
+                       n++;
                }
-               n++;
        }
 
        if (WARN_ON_ONCE(n != segments)) {
@@ -3053,7 +3063,8 @@ static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
        else
                ctrl->max_zeroes_sectors = 0;
 
-       if (nvme_ctrl_limited_cns(ctrl))
+       if (ctrl->subsys->subtype != NVME_NQN_NVME ||
+           nvme_ctrl_limited_cns(ctrl))
                return 0;
 
        id = kzalloc(sizeof(*id), GFP_KERNEL);
index 723e7d5b778f2c68aa3eb79decba51c8f2223f90..d24ea2e051564670254c44152de5660b32048a86 100644 (file)
@@ -464,7 +464,8 @@ static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu(
        return (struct nvme_uring_cmd_pdu *)&ioucmd->pdu;
 }
 
-static void nvme_uring_task_meta_cb(struct io_uring_cmd *ioucmd)
+static void nvme_uring_task_meta_cb(struct io_uring_cmd *ioucmd,
+                                   unsigned issue_flags)
 {
        struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
        struct request *req = pdu->req;
@@ -485,17 +486,18 @@ static void nvme_uring_task_meta_cb(struct io_uring_cmd *ioucmd)
                blk_rq_unmap_user(req->bio);
        blk_mq_free_request(req);
 
-       io_uring_cmd_done(ioucmd, status, result);
+       io_uring_cmd_done(ioucmd, status, result, issue_flags);
 }
 
-static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd)
+static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd,
+                              unsigned issue_flags)
 {
        struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
 
        if (pdu->bio)
                blk_rq_unmap_user(pdu->bio);
 
-       io_uring_cmd_done(ioucmd, pdu->nvme_status, pdu->u.result);
+       io_uring_cmd_done(ioucmd, pdu->nvme_status, pdu->u.result, issue_flags);
 }
 
 static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
@@ -517,7 +519,7 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
         * Otherwise, move the completion to task work.
         */
        if (cookie != NULL && blk_rq_is_poll(req))
-               nvme_uring_task_cb(ioucmd);
+               nvme_uring_task_cb(ioucmd, IO_URING_F_UNLOCKED);
        else
                io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_cb);
 
@@ -539,7 +541,7 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io_meta(struct request *req,
         * Otherwise, move the completion to task work.
         */
        if (cookie != NULL && blk_rq_is_poll(req))
-               nvme_uring_task_meta_cb(ioucmd);
+               nvme_uring_task_meta_cb(ioucmd, IO_URING_F_UNLOCKED);
        else
                io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_meta_cb);
 
index fc39d01e7b63be8a4f9e794f915715cbc2c8609b..9171452e2f6d4e2eed95d0c6ef9ec0cbf05bc7ef 100644 (file)
@@ -123,9 +123,8 @@ void nvme_mpath_start_request(struct request *rq)
                return;
 
        nvme_req(rq)->flags |= NVME_MPATH_IO_STATS;
-       nvme_req(rq)->start_time = bdev_start_io_acct(disk->part0,
-                                       blk_rq_bytes(rq) >> SECTOR_SHIFT,
-                                       req_op(rq), jiffies);
+       nvme_req(rq)->start_time = bdev_start_io_acct(disk->part0, req_op(rq),
+                                                     jiffies);
 }
 EXPORT_SYMBOL_GPL(nvme_mpath_start_request);
 
@@ -136,7 +135,8 @@ void nvme_mpath_end_request(struct request *rq)
        if (!(nvme_req(rq)->flags & NVME_MPATH_IO_STATS))
                return;
        bdev_end_io_acct(ns->head->disk->part0, req_op(rq),
-               nvme_req(rq)->start_time);
+                        blk_rq_bytes(rq) >> SECTOR_SHIFT,
+                        nvme_req(rq)->start_time);
 }
 
 void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
index 5b95c94ee40f209e8d1cef39ee65da67a0b1736a..b615906263f3742487bfca733c80fe3d6433d6b2 100644 (file)
@@ -3073,6 +3073,7 @@ out_dev_unmap:
        nvme_dev_unmap(dev);
 out_uninit_ctrl:
        nvme_uninit_ctrl(&dev->ctrl);
+       nvme_put_ctrl(&dev->ctrl);
        return result;
 }
 
@@ -3415,6 +3416,8 @@ static const struct pci_device_id nvme_id_table[] = {
                .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
        { PCI_DEVICE(0x2646, 0x501E),   /* KINGSTON OM3PGP4xxxxQ OS21011 NVMe SSD */
                .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+       { PCI_DEVICE(0x1f40, 0x1202),   /* Netac Technologies Co. NV3000 NVMe SSD */
+               .driver_data = NVME_QUIRK_BOGUS_NID, },
        { PCI_DEVICE(0x1f40, 0x5236),   /* Netac Technologies Co. NV7000 NVMe SSD */
                .driver_data = NVME_QUIRK_BOGUS_NID, },
        { PCI_DEVICE(0x1e4B, 0x1001),   /* MAXIO MAP1001 */
@@ -3435,6 +3438,8 @@ static const struct pci_device_id nvme_id_table[] = {
                .driver_data = NVME_QUIRK_BOGUS_NID, },
        { PCI_DEVICE(0x1d97, 0x2263), /* Lexar NM610 */
                .driver_data = NVME_QUIRK_BOGUS_NID, },
+       { PCI_DEVICE(0x1d97, 0x1d97), /* Lexar NM620 */
+               .driver_data = NVME_QUIRK_BOGUS_NID, },
        { PCI_DEVICE(0x1d97, 0x2269), /* Lexar NM760 */
                .driver_data = NVME_QUIRK_BOGUS_NID, },
        { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
index 7723a498952442c14c5346a5e30fd1383aa0940e..42c0598c31f2b7e680e7cb10ca9556d1b47826b1 100644 (file)
@@ -208,6 +208,18 @@ static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
        return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
 }
 
+static inline void *nvme_tcp_req_cmd_pdu(struct nvme_tcp_request *req)
+{
+       return req->pdu;
+}
+
+static inline void *nvme_tcp_req_data_pdu(struct nvme_tcp_request *req)
+{
+       /* use the pdu space in the back for the data pdu */
+       return req->pdu + sizeof(struct nvme_tcp_cmd_pdu) -
+               sizeof(struct nvme_tcp_data_pdu);
+}
+
 static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_request *req)
 {
        if (nvme_is_fabrics(req->req.cmd))
@@ -614,7 +626,7 @@ static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
 
 static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req)
 {
-       struct nvme_tcp_data_pdu *data = req->pdu;
+       struct nvme_tcp_data_pdu *data = nvme_tcp_req_data_pdu(req);
        struct nvme_tcp_queue *queue = req->queue;
        struct request *rq = blk_mq_rq_from_pdu(req);
        u32 h2cdata_sent = req->pdu_len;
@@ -1038,7 +1050,7 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
 static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
 {
        struct nvme_tcp_queue *queue = req->queue;
-       struct nvme_tcp_cmd_pdu *pdu = req->pdu;
+       struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
        bool inline_data = nvme_tcp_has_inline_data(req);
        u8 hdgst = nvme_tcp_hdgst_len(queue);
        int len = sizeof(*pdu) + hdgst - req->offset;
@@ -1077,7 +1089,7 @@ static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
 static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
 {
        struct nvme_tcp_queue *queue = req->queue;
-       struct nvme_tcp_data_pdu *pdu = req->pdu;
+       struct nvme_tcp_data_pdu *pdu = nvme_tcp_req_data_pdu(req);
        u8 hdgst = nvme_tcp_hdgst_len(queue);
        int len = sizeof(*pdu) - req->offset + hdgst;
        int ret;
@@ -2284,7 +2296,7 @@ static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq)
 {
        struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
        struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
-       struct nvme_tcp_cmd_pdu *pdu = req->pdu;
+       struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
        u8 opc = pdu->cmd.common.opcode, fctype = pdu->cmd.fabrics.fctype;
        int qid = nvme_tcp_queue_id(req->queue);
 
@@ -2323,7 +2335,7 @@ static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
                        struct request *rq)
 {
        struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
-       struct nvme_tcp_cmd_pdu *pdu = req->pdu;
+       struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
        struct nvme_command *c = &pdu->cmd;
 
        c->common.flags |= NVME_CMD_SGL_METABUF;
@@ -2343,7 +2355,7 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
                struct request *rq)
 {
        struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
-       struct nvme_tcp_cmd_pdu *pdu = req->pdu;
+       struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
        struct nvme_tcp_queue *queue = req->queue;
        u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
        blk_status_t ret;
@@ -2682,6 +2694,15 @@ static struct nvmf_transport_ops nvme_tcp_transport = {
 
 static int __init nvme_tcp_init_module(void)
 {
+       BUILD_BUG_ON(sizeof(struct nvme_tcp_hdr) != 8);
+       BUILD_BUG_ON(sizeof(struct nvme_tcp_cmd_pdu) != 72);
+       BUILD_BUG_ON(sizeof(struct nvme_tcp_data_pdu) != 24);
+       BUILD_BUG_ON(sizeof(struct nvme_tcp_rsp_pdu) != 24);
+       BUILD_BUG_ON(sizeof(struct nvme_tcp_r2t_pdu) != 24);
+       BUILD_BUG_ON(sizeof(struct nvme_tcp_icreq_pdu) != 128);
+       BUILD_BUG_ON(sizeof(struct nvme_tcp_icresp_pdu) != 128);
+       BUILD_BUG_ON(sizeof(struct nvme_tcp_term_pdu) != 24);
+
        nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
                        WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
        if (!nvme_tcp_wq)
index f66ed13d7c11deacbdfdd982ff8548f913f1b933..3935165048e74199cd4a9706811dd7d63fc39476 100644 (file)
@@ -756,8 +756,10 @@ static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
 
 void nvmet_req_complete(struct nvmet_req *req, u16 status)
 {
+       struct nvmet_sq *sq = req->sq;
+
        __nvmet_req_complete(req, status);
-       percpu_ref_put(&req->sq->ref);
+       percpu_ref_put(&sq->ref);
 }
 EXPORT_SYMBOL_GPL(nvmet_req_complete);
 
index 174ef3574e07f5cb7e771d108deeb2e09056f971..22024b830788f86d23a213809222511c7ae6ea84 100644 (file)
@@ -1231,7 +1231,7 @@ struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
                                                  "#nvmem-cell-cells",
                                                  index, &cell_spec);
        if (ret)
-               return ERR_PTR(ret);
+               return ERR_PTR(-ENOENT);
 
        if (cell_spec.args_count > 1)
                return ERR_PTR(-EINVAL);
index 83ae838ceb5f0a3ea93750ea51568eb83bd91764..549c4bd5caecafd925692f308d45fa6c40a3387f 100644 (file)
@@ -76,6 +76,27 @@ struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n)
 }
 EXPORT_SYMBOL_GPL(pci_bus_resource_n);
 
+void pci_bus_remove_resource(struct pci_bus *bus, struct resource *res)
+{
+       struct pci_bus_resource *bus_res, *tmp;
+       int i;
+
+       for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
+               if (bus->resource[i] == res) {
+                       bus->resource[i] = NULL;
+                       return;
+               }
+       }
+
+       list_for_each_entry_safe(bus_res, tmp, &bus->resources, list) {
+               if (bus_res->res == res) {
+                       list_del(&bus_res->list);
+                       kfree(bus_res);
+                       return;
+               }
+       }
+}
+
 void pci_bus_remove_resources(struct pci_bus *bus)
 {
        int i;
index 0de7c255254e0ba7f4e33c4da8a7b7573e815bb5..d6de5a29412820d14e3a55dce951b94228bb60de 100644 (file)
@@ -284,7 +284,7 @@ static long cros_ec_chardev_ioctl_xcmd(struct cros_ec_dev *ec, void __user *arg)
            u_cmd.insize > EC_MAX_MSG_BYTES)
                return -EINVAL;
 
-       s_cmd = kmalloc(sizeof(*s_cmd) + max(u_cmd.outsize, u_cmd.insize),
+       s_cmd = kzalloc(sizeof(*s_cmd) + max(u_cmd.outsize, u_cmd.insize),
                        GFP_KERNEL);
        if (!s_cmd)
                return -ENOMEM;
index 8e6f8a655079066975acd420d230c7f5ff150511..05f41317846296b067f41d050abc66d26cf09ff0 100644 (file)
@@ -724,6 +724,8 @@ static int axp288_fuel_gauge_probe(struct platform_device *pdev)
 
        for (i = 0; i < AXP288_FG_INTR_NUM; i++) {
                pirq = platform_get_irq(pdev, i);
+               if (pirq < 0)
+                       continue;
                ret = regmap_irq_get_virq(axp20x->regmap_irqc, pirq);
                if (ret < 0)
                        return dev_err_probe(dev, ret, "getting vIRQ %d\n", pirq);
index be34b98484508a980db556af6352aaf12f622864..de67b985f0a913a355ea3c27e3897d7373b78db2 100644 (file)
@@ -1906,6 +1906,7 @@ static void bq24190_remove(struct i2c_client *client)
        struct bq24190_dev_info *bdi = i2c_get_clientdata(client);
        int error;
 
+       cancel_delayed_work_sync(&bdi->input_current_limit_work);
        error = pm_runtime_resume_and_get(bdi->dev);
        if (error < 0)
                dev_warn(bdi->dev, "pm_runtime_get failed: %i\n", error);
index cadb6a0c2cc7e0a5e0e4e91951e0b6c758c27f8e..b6c96376776a9aa36f3d06fa0654f3bd8ddd3e14 100644 (file)
@@ -276,7 +276,7 @@ static int cros_usbpd_charger_get_power_info(struct port_data *port)
                port->psy_current_max = 0;
                break;
        default:
-               dev_err(dev, "Port %d: default case!\n", port->port_number);
+               dev_dbg(dev, "Port %d: default case!\n", port->port_number);
                port->psy_usb_type = POWER_SUPPLY_USB_TYPE_SDP;
        }
 
index 14da5c595dd9f31a720118112395af87810fe32f..a87aeaea38e1395c142d2bfad50cec37d3bf919c 100644 (file)
@@ -657,6 +657,7 @@ static int da9150_charger_remove(struct platform_device *pdev)
 
        if (!IS_ERR_OR_NULL(charger->usb_phy))
                usb_unregister_notifier(charger->usb_phy, &charger->otg_nb);
+       cancel_work_sync(&charger->otg_work);
 
        power_supply_unregister(charger->battery);
        power_supply_unregister(charger->usb);
index 4f9c1c417916550d76706253347acc7e64d18f3d..36f807b5ec4425c30646d82f558b551cda19d9c7 100644 (file)
@@ -785,8 +785,6 @@ rk817_read_or_set_full_charge_on_boot(struct rk817_charger *charger,
                regmap_bulk_read(rk808->regmap, RK817_GAS_GAUGE_Q_PRES_H3,
                                 bulk_reg, 4);
                tmp = get_unaligned_be32(bulk_reg);
-               if (tmp < 0)
-                       tmp = 0;
                boot_charge_mah = ADC_TO_CHARGE_UAH(tmp,
                                                    charger->res_div) / 1000;
                /*
@@ -825,8 +823,6 @@ rk817_read_or_set_full_charge_on_boot(struct rk817_charger *charger,
        regmap_bulk_read(rk808->regmap, RK817_GAS_GAUGE_Q_PRES_H3,
                         bulk_reg, 4);
        tmp = get_unaligned_be32(bulk_reg);
-       if (tmp < 0)
-               tmp = 0;
        boot_charge_mah = ADC_TO_CHARGE_UAH(tmp, charger->res_div) / 1000;
        regmap_bulk_read(rk808->regmap, RK817_GAS_GAUGE_OCV_VOL_H,
                         bulk_reg, 2);
index 362fa631f39b2654ee051209826fad0ff3648c36..a226dc1b65d715f03addcf638fa52510360b1272 100644 (file)
@@ -1145,10 +1145,12 @@ static int alua_activate(struct scsi_device *sdev,
        rcu_read_unlock();
        mutex_unlock(&h->init_mutex);
 
-       if (alua_rtpg_queue(pg, sdev, qdata, true))
+       if (alua_rtpg_queue(pg, sdev, qdata, true)) {
                fn = NULL;
-       else
+       } else {
+               kfree(qdata);
                err = SCSI_DH_DEV_OFFLINED;
+       }
        kref_put(&pg->kref, release_port_group);
 out:
        if (fn)
index f7f62e56afcae6db20711357ed93242562ddcfed..9b6fbbe15d9226a0f48c842a781e59494540f7d3 100644 (file)
@@ -341,9 +341,6 @@ static void scsi_host_dev_release(struct device *dev)
        struct Scsi_Host *shost = dev_to_shost(dev);
        struct device *parent = dev->parent;
 
-       /* In case scsi_remove_host() has not been called. */
-       scsi_proc_hostdir_rm(shost->hostt);
-
        /* Wait for functions invoked through call_rcu(&scmd->rcu, ...) */
        rcu_barrier();
 
index 40f238fa80cc18341b329a7e5eb1a2060a5e70d8..364fb1b5e45ace6cd4c5e5aa3ddcc706dee47063 100644 (file)
@@ -1393,4 +1393,6 @@ void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc);
 void mpi3mr_flush_cmds_for_unrecovered_controller(struct mpi3mr_ioc *mrioc);
 void mpi3mr_free_enclosure_list(struct mpi3mr_ioc *mrioc);
 int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc);
+void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc,
+       struct mpi3mr_sas_node *sas_expander);
 #endif /*MPI3MR_H_INCLUDED*/
index 29acf6111db302d3a0e9dba2c48aaa046b8f97ba..a565817aa56d4b68149323a8132997844d081bf4 100644 (file)
@@ -3837,29 +3837,34 @@ retry_init:
 
        mpi3mr_print_ioc_info(mrioc);
 
-       dprint_init(mrioc, "allocating config page buffers\n");
-       mrioc->cfg_page = dma_alloc_coherent(&mrioc->pdev->dev,
-           MPI3MR_DEFAULT_CFG_PAGE_SZ, &mrioc->cfg_page_dma, GFP_KERNEL);
        if (!mrioc->cfg_page) {
-               retval = -1;
-               goto out_failed_noretry;
+               dprint_init(mrioc, "allocating config page buffers\n");
+               mrioc->cfg_page_sz = MPI3MR_DEFAULT_CFG_PAGE_SZ;
+               mrioc->cfg_page = dma_alloc_coherent(&mrioc->pdev->dev,
+                   mrioc->cfg_page_sz, &mrioc->cfg_page_dma, GFP_KERNEL);
+               if (!mrioc->cfg_page) {
+                       retval = -1;
+                       goto out_failed_noretry;
+               }
        }
 
-       mrioc->cfg_page_sz = MPI3MR_DEFAULT_CFG_PAGE_SZ;
-
-       retval = mpi3mr_alloc_reply_sense_bufs(mrioc);
-       if (retval) {
-               ioc_err(mrioc,
-                   "%s :Failed to allocated reply sense buffers %d\n",
-                   __func__, retval);
-               goto out_failed_noretry;
+       if (!mrioc->init_cmds.reply) {
+               retval = mpi3mr_alloc_reply_sense_bufs(mrioc);
+               if (retval) {
+                       ioc_err(mrioc,
+                           "%s :Failed to allocated reply sense buffers %d\n",
+                           __func__, retval);
+                       goto out_failed_noretry;
+               }
        }
 
-       retval = mpi3mr_alloc_chain_bufs(mrioc);
-       if (retval) {
-               ioc_err(mrioc, "Failed to allocated chain buffers %d\n",
-                   retval);
-               goto out_failed_noretry;
+       if (!mrioc->chain_sgl_list) {
+               retval = mpi3mr_alloc_chain_bufs(mrioc);
+               if (retval) {
+                       ioc_err(mrioc, "Failed to allocated chain buffers %d\n",
+                           retval);
+                       goto out_failed_noretry;
+               }
        }
 
        retval = mpi3mr_issue_iocinit(mrioc);
@@ -4382,13 +4387,20 @@ void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
                    mrioc->admin_req_base, mrioc->admin_req_dma);
                mrioc->admin_req_base = NULL;
        }
-
+       if (mrioc->cfg_page) {
+               dma_free_coherent(&mrioc->pdev->dev, mrioc->cfg_page_sz,
+                   mrioc->cfg_page, mrioc->cfg_page_dma);
+               mrioc->cfg_page = NULL;
+       }
        if (mrioc->pel_seqnum_virt) {
                dma_free_coherent(&mrioc->pdev->dev, mrioc->pel_seqnum_sz,
                    mrioc->pel_seqnum_virt, mrioc->pel_seqnum_dma);
                mrioc->pel_seqnum_virt = NULL;
        }
 
+       kfree(mrioc->throttle_groups);
+       mrioc->throttle_groups = NULL;
+
        kfree(mrioc->logdata_buf);
        mrioc->logdata_buf = NULL;
 
index a794cc8a1c0b137697624d2e90f3a257e8e7983d..6d55698ea4d16c9ca22c6ca136e18ca7be2977d0 100644 (file)
@@ -5078,6 +5078,8 @@ static void mpi3mr_remove(struct pci_dev *pdev)
        struct workqueue_struct *wq;
        unsigned long flags;
        struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next;
+       struct mpi3mr_hba_port *port, *hba_port_next;
+       struct mpi3mr_sas_node *sas_expander, *sas_expander_next;
 
        if (!shost)
                return;
@@ -5117,6 +5119,28 @@ static void mpi3mr_remove(struct pci_dev *pdev)
        mpi3mr_free_mem(mrioc);
        mpi3mr_cleanup_resources(mrioc);
 
+       spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+       list_for_each_entry_safe_reverse(sas_expander, sas_expander_next,
+           &mrioc->sas_expander_list, list) {
+               spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+               mpi3mr_expander_node_remove(mrioc, sas_expander);
+               spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+       }
+       list_for_each_entry_safe(port, hba_port_next, &mrioc->hba_port_table_list, list) {
+               ioc_info(mrioc,
+                   "removing hba_port entry: %p port: %d from hba_port list\n",
+                   port, port->port_id);
+               list_del(&port->list);
+               kfree(port);
+       }
+       spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+
+       if (mrioc->sas_hba.num_phys) {
+               kfree(mrioc->sas_hba.phy);
+               mrioc->sas_hba.phy = NULL;
+               mrioc->sas_hba.num_phys = 0;
+       }
+
        spin_lock(&mrioc_list_lock);
        list_del(&mrioc->list);
        spin_unlock(&mrioc_list_lock);
index be25f242fa79495f796bc21180105927440e4011..5748bd9369ff78495085f15595b4c580742cdd67 100644 (file)
@@ -9,9 +9,6 @@
 
 #include "mpi3mr.h"
 
-static void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc,
-       struct mpi3mr_sas_node *sas_expander);
-
 /**
  * mpi3mr_post_transport_req - Issue transport requests and wait
  * @mrioc: Adapter instance reference
@@ -2164,7 +2161,7 @@ out_fail:
  *
  * Return nothing.
  */
-static void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc,
+void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc,
        struct mpi3mr_sas_node *sas_expander)
 {
        struct mpi3mr_sas_port *mr_sas_port, *next;
index e5ecd6ada6cdd851e833fce012d1e6f987bc3c3d..e8a4750f6ec473bd6e41385fa2c920179e0c4ba5 100644 (file)
@@ -785,7 +785,7 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
                goto out_fail;
        }
        port = sas_port_alloc_num(sas_node->parent_dev);
-       if ((sas_port_add(port))) {
+       if (!port || (sas_port_add(port))) {
                ioc_err(ioc, "failure at %s:%d/%s()!\n",
                        __FILE__, __LINE__, __func__);
                goto out_fail;
@@ -824,6 +824,12 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
                            mpt3sas_port->remote_identify.sas_address;
        }
 
+       if (!rphy) {
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
+               goto out_delete_port;
+       }
+
        rphy->identify = mpt3sas_port->remote_identify;
 
        if ((sas_rphy_add(rphy))) {
@@ -831,6 +837,7 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
                        __FILE__, __LINE__, __func__);
                sas_rphy_free(rphy);
                rphy = NULL;
+               goto out_delete_port;
        }
 
        if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) {
@@ -857,7 +864,10 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
                    rphy_to_expander_device(rphy), hba_port->port_id);
        return mpt3sas_port;
 
- out_fail:
+out_delete_port:
+       sas_port_delete(port);
+
+out_fail:
        list_for_each_entry_safe(mpt3sas_phy, next, &mpt3sas_port->phy_list,
            port_siblings)
                list_del(&mpt3sas_phy->port_siblings);
index 030625ebb4e653a0da4df20f62751f4926332354..71feda2cdb63046a713c5018ae64948abed1084f 100644 (file)
@@ -1900,6 +1900,8 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
        }
 
        req->outstanding_cmds[index] = NULL;
+
+       qla_put_fw_resources(sp->qpair, &sp->iores);
        return sp;
 }
 
@@ -3112,7 +3114,6 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
        }
        bsg_reply->reply_payload_rcv_len = 0;
 
-       qla_put_fw_resources(sp->qpair, &sp->iores);
 done:
        /* Return the vendor specific reply to API */
        bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
index 80c4ee9df2a4ff86fe6243c9201cb29e2f54899b..bee1b8a820207456ce446ae6222be6fac3131c52 100644 (file)
@@ -1865,6 +1865,17 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
        for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
                sp = req->outstanding_cmds[cnt];
                if (sp) {
+                       /*
+                        * perform lockless completion during driver unload
+                        */
+                       if (qla2x00_chip_is_down(vha)) {
+                               req->outstanding_cmds[cnt] = NULL;
+                               spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
+                               sp->done(sp, res);
+                               spin_lock_irqsave(qp->qp_lock_ptr, flags);
+                               continue;
+                       }
+
                        switch (sp->cmd_type) {
                        case TYPE_SRB:
                                qla2x00_abort_srb(qp, sp, res, &flags);
index 7d2210a006f0d44b1c71233e0bc8d684ec4f77ae..5cce1ba70fc602697ad02f492db40703710e445c 100644 (file)
@@ -326,6 +326,9 @@ static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page)
        unsigned char vpd_header[SCSI_VPD_HEADER_SIZE] __aligned(4);
        int result;
 
+       if (sdev->no_vpd_size)
+               return SCSI_DEFAULT_VPD_LEN;
+
        /*
         * Fetch the VPD page header to find out how big the page
         * is. This is done to prevent problems on legacy devices
index c7080454aea997a66202badf18fbfe383c42f32b..3fcaf10a9dfe76daa651db71c9a2f0f6b1bbd1b0 100644 (file)
@@ -134,7 +134,7 @@ static struct {
        {"3PARdata", "VV", NULL, BLIST_REPORTLUN2},
        {"ADAPTEC", "AACRAID", NULL, BLIST_FORCELUN},
        {"ADAPTEC", "Adaptec 5400S", NULL, BLIST_FORCELUN},
-       {"AIX", "VDASD", NULL, BLIST_TRY_VPD_PAGES},
+       {"AIX", "VDASD", NULL, BLIST_TRY_VPD_PAGES | BLIST_NO_VPD_SIZE},
        {"AFT PRO", "-IX CF", "0.0>", BLIST_FORCELUN},
        {"BELKIN", "USB 2 HS-CF", "1.95",  BLIST_FORCELUN | BLIST_INQUIRY_36},
        {"BROWNIE", "1200U3P", NULL, BLIST_NOREPORTLUN},
@@ -188,6 +188,7 @@ static struct {
        {"HPE", "OPEN-", "*", BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES},
        {"IBM", "AuSaV1S2", NULL, BLIST_FORCELUN},
        {"IBM", "ProFibre 4000R", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
+       {"IBM", "2076", NULL, BLIST_NO_VPD_SIZE},
        {"IBM", "2105", NULL, BLIST_RETRY_HWERROR},
        {"iomega", "jaz 1GB", "J.86", BLIST_NOTQ | BLIST_NOLUN},
        {"IOMEGA", "ZIP", NULL, BLIST_NOTQ | BLIST_NOLUN},
@@ -233,6 +234,7 @@ static struct {
        {"SGI", "RAID5", "*", BLIST_SPARSELUN},
        {"SGI", "TP9100", "*", BLIST_REPORTLUN2},
        {"SGI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+       {"SKhynix", "H28U74301AMR", NULL, BLIST_SKIP_VPD_PAGES},
        {"IBM", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
        {"SUN", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
        {"DELL", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
index 4e842d79de317c2e7ed5a54e45f1fd32aa07b908..d217be323cc690f0736df29349d085bd27575170 100644 (file)
@@ -1057,6 +1057,9 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
        else if (*bflags & BLIST_SKIP_VPD_PAGES)
                sdev->skip_vpd_pages = 1;
 
+       if (*bflags & BLIST_NO_VPD_SIZE)
+               sdev->no_vpd_size = 1;
+
        transport_configure_device(&sdev->sdev_gendev);
 
        if (sdev->host->hostt->slave_configure) {
index 23ce2f78c4ed4c5e003e3ffeb022e1a1e20a0f1e..26efe12012a0da443ddd21586c03b56381ed1050 100644 (file)
@@ -191,9 +191,9 @@ static const struct llcc_slice_config sc8280xp_data[] = {
        { LLCC_CVP,      28, 512,  3, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
        { LLCC_APTCM,    30, 1024, 3, 1, 0x0,   0x1, 1, 0, 0, 1, 0, 0 },
        { LLCC_WRCACHE,  31, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
-       { LLCC_CVPFW,    32, 512,  1, 0, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
-       { LLCC_CPUSS1,   33, 2048, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
-       { LLCC_CPUHWT,   36, 512,  1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+       { LLCC_CVPFW,    17, 512,  1, 0, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+       { LLCC_CPUSS1,   3, 2048, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+       { LLCC_CPUHWT,   5, 512,  1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
 };
 
 static const struct llcc_slice_config sdm845_data[] =  {
index 2d3ee22b924943c33d41cc6b11f9a79ed0206512..538fa182169a4171752df2f3c41b0b4cf30b1c1d 100644 (file)
@@ -176,7 +176,8 @@ static int qcom_rmtfs_mem_probe(struct platform_device *pdev)
        struct reserved_mem *rmem;
        struct qcom_rmtfs_mem *rmtfs_mem;
        u32 client_id;
-       u32 num_vmids, vmid[NUM_MAX_VMIDS];
+       u32 vmid[NUM_MAX_VMIDS];
+       int num_vmids;
        int ret, i;
 
        rmem = of_reserved_mem_lookup(node);
@@ -228,8 +229,11 @@ static int qcom_rmtfs_mem_probe(struct platform_device *pdev)
        }
 
        num_vmids = of_property_count_u32_elems(node, "qcom,vmid");
-       if (num_vmids < 0) {
-               dev_err(&pdev->dev, "failed to count qcom,vmid elements: %d\n", ret);
+       if (num_vmids == -EINVAL) {
+               /* qcom,vmid is optional */
+               num_vmids = 0;
+       } else if (num_vmids < 0) {
+               dev_err(&pdev->dev, "failed to count qcom,vmid elements: %d\n", num_vmids);
                goto remove_cdev;
        } else if (num_vmids > NUM_MAX_VMIDS) {
                dev_warn(&pdev->dev,
index 297dc62bca2986f014c4b4060c95d112a807fe7d..372d64756ed64b10c3dd9096aa20886320837ff9 100644 (file)
@@ -267,35 +267,34 @@ int amdtee_open_session(struct tee_context *ctx,
                goto out;
        }
 
+       /* Open session with loaded TA */
+       handle_open_session(arg, &session_info, param);
+       if (arg->ret != TEEC_SUCCESS) {
+               pr_err("open_session failed %d\n", arg->ret);
+               handle_unload_ta(ta_handle);
+               kref_put(&sess->refcount, destroy_session);
+               goto out;
+       }
+
        /* Find an empty session index for the given TA */
        spin_lock(&sess->lock);
        i = find_first_zero_bit(sess->sess_mask, TEE_NUM_SESSIONS);
-       if (i < TEE_NUM_SESSIONS)
+       if (i < TEE_NUM_SESSIONS) {
+               sess->session_info[i] = session_info;
+               set_session_id(ta_handle, i, &arg->session);
                set_bit(i, sess->sess_mask);
+       }
        spin_unlock(&sess->lock);
 
        if (i >= TEE_NUM_SESSIONS) {
                pr_err("reached maximum session count %d\n", TEE_NUM_SESSIONS);
+               handle_close_session(ta_handle, session_info);
                handle_unload_ta(ta_handle);
                kref_put(&sess->refcount, destroy_session);
                rc = -ENOMEM;
                goto out;
        }
 
-       /* Open session with loaded TA */
-       handle_open_session(arg, &session_info, param);
-       if (arg->ret != TEEC_SUCCESS) {
-               pr_err("open_session failed %d\n", arg->ret);
-               spin_lock(&sess->lock);
-               clear_bit(i, sess->sess_mask);
-               spin_unlock(&sess->lock);
-               handle_unload_ta(ta_handle);
-               kref_put(&sess->refcount, destroy_session);
-               goto out;
-       }
-
-       sess->session_info[i] = session_info;
-       set_session_id(ta_handle, i, &arg->session);
 out:
        free_pages((u64)ta, get_order(ta_size));
        return rc;
index 55679fd86505d762ea05ec68028e1a500aec0b5d..566df4522b8853fb89d965d994353c6c0d603671 100644 (file)
@@ -613,6 +613,7 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
        struct thermal_instance *pos;
        struct thermal_zone_device *pos1;
        struct thermal_cooling_device *pos2;
+       bool upper_no_limit;
        int result;
 
        if (trip >= tz->num_trips || trip < 0)
@@ -632,7 +633,13 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
 
        /* lower default 0, upper default max_state */
        lower = lower == THERMAL_NO_LIMIT ? 0 : lower;
-       upper = upper == THERMAL_NO_LIMIT ? cdev->max_state : upper;
+
+       if (upper == THERMAL_NO_LIMIT) {
+               upper = cdev->max_state;
+               upper_no_limit = true;
+       } else {
+               upper_no_limit = false;
+       }
 
        if (lower > upper || upper > cdev->max_state)
                return -EINVAL;
@@ -644,6 +651,7 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
        dev->cdev = cdev;
        dev->trip = trip;
        dev->upper = upper;
+       dev->upper_no_limit = upper_no_limit;
        dev->lower = lower;
        dev->target = THERMAL_NO_TARGET;
        dev->weight = weight;
@@ -1045,6 +1053,91 @@ devm_thermal_of_cooling_device_register(struct device *dev,
 }
 EXPORT_SYMBOL_GPL(devm_thermal_of_cooling_device_register);
 
+static bool thermal_cooling_device_present(struct thermal_cooling_device *cdev)
+{
+       struct thermal_cooling_device *pos = NULL;
+
+       list_for_each_entry(pos, &thermal_cdev_list, node) {
+               if (pos == cdev)
+                       return true;
+       }
+
+       return false;
+}
+
+/**
+ * thermal_cooling_device_update - Update a cooling device object
+ * @cdev: Target cooling device.
+ *
+ * Update @cdev to reflect a change of the underlying hardware or platform.
+ *
+ * Must be called when the maximum cooling state of @cdev becomes invalid and so
+ * its .get_max_state() callback needs to be run to produce the new maximum
+ * cooling state value.
+ */
+void thermal_cooling_device_update(struct thermal_cooling_device *cdev)
+{
+       struct thermal_instance *ti;
+       unsigned long state;
+
+       if (IS_ERR_OR_NULL(cdev))
+               return;
+
+       /*
+        * Hold thermal_list_lock throughout the update to prevent the device
+        * from going away while being updated.
+        */
+       mutex_lock(&thermal_list_lock);
+
+       if (!thermal_cooling_device_present(cdev))
+               goto unlock_list;
+
+       /*
+        * Update under the cdev lock to prevent the state from being set beyond
+        * the new limit concurrently.
+        */
+       mutex_lock(&cdev->lock);
+
+       if (cdev->ops->get_max_state(cdev, &cdev->max_state))
+               goto unlock;
+
+       thermal_cooling_device_stats_reinit(cdev);
+
+       list_for_each_entry(ti, &cdev->thermal_instances, cdev_node) {
+               if (ti->upper == cdev->max_state)
+                       continue;
+
+               if (ti->upper < cdev->max_state) {
+                       if (ti->upper_no_limit)
+                               ti->upper = cdev->max_state;
+
+                       continue;
+               }
+
+               ti->upper = cdev->max_state;
+               if (ti->lower > ti->upper)
+                       ti->lower = ti->upper;
+
+               if (ti->target == THERMAL_NO_TARGET)
+                       continue;
+
+               if (ti->target > ti->upper)
+                       ti->target = ti->upper;
+       }
+
+       if (cdev->ops->get_cur_state(cdev, &state) || state > cdev->max_state)
+               goto unlock;
+
+       thermal_cooling_device_stats_update(cdev, state);
+
+unlock:
+       mutex_unlock(&cdev->lock);
+
+unlock_list:
+       mutex_unlock(&thermal_list_lock);
+}
+EXPORT_SYMBOL_GPL(thermal_cooling_device_update);
+
 static void __unbind(struct thermal_zone_device *tz, int mask,
                     struct thermal_cooling_device *cdev)
 {
@@ -1067,20 +1160,17 @@ void thermal_cooling_device_unregister(struct thermal_cooling_device *cdev)
        int i;
        const struct thermal_zone_params *tzp;
        struct thermal_zone_device *tz;
-       struct thermal_cooling_device *pos = NULL;
 
        if (!cdev)
                return;
 
        mutex_lock(&thermal_list_lock);
-       list_for_each_entry(pos, &thermal_cdev_list, node)
-               if (pos == cdev)
-                       break;
-       if (pos != cdev) {
-               /* thermal cooling device not found */
+
+       if (!thermal_cooling_device_present(cdev)) {
                mutex_unlock(&thermal_list_lock);
                return;
        }
+
        list_del(&cdev->node);
 
        /* Unbind all thermal zones associated with 'this' cdev */
@@ -1309,7 +1399,7 @@ thermal_zone_device_register_with_trips(const char *type, struct thermal_trip *t
                struct thermal_trip trip;
 
                result = thermal_zone_get_trip(tz, count, &trip);
-               if (result)
+               if (result || !trip.temperature)
                        set_bit(count, &tz->trips_disabled);
        }
 
index 7af54382e915172975bd63079f3a5cf23de8d1fc..3d4a787c6b28a0baf7197726058708a5a7b830af 100644 (file)
@@ -101,6 +101,7 @@ struct thermal_instance {
        struct list_head tz_node; /* node in tz->thermal_instances */
        struct list_head cdev_node; /* node in cdev->thermal_instances */
        unsigned int weight; /* The weight of the cooling device */
+       bool upper_no_limit;
 };
 
 #define to_thermal_zone(_dev) \
@@ -127,6 +128,7 @@ int thermal_zone_create_device_groups(struct thermal_zone_device *, int);
 void thermal_zone_destroy_device_groups(struct thermal_zone_device *);
 void thermal_cooling_device_setup_sysfs(struct thermal_cooling_device *);
 void thermal_cooling_device_destroy_sysfs(struct thermal_cooling_device *cdev);
+void thermal_cooling_device_stats_reinit(struct thermal_cooling_device *cdev);
 /* used only at binding time */
 ssize_t trip_point_show(struct device *, struct device_attribute *, char *);
 ssize_t weight_show(struct device *, struct device_attribute *, char *);
index cef860deaf912db0b264a14e71ffff0bff3eee52..a4aba7b8bb8b96758115f2de301223769a98eafd 100644 (file)
@@ -685,6 +685,8 @@ void thermal_cooling_device_stats_update(struct thermal_cooling_device *cdev,
 {
        struct cooling_dev_stats *stats = cdev->stats;
 
+       lockdep_assert_held(&cdev->lock);
+
        if (!stats)
                return;
 
@@ -706,13 +708,22 @@ static ssize_t total_trans_show(struct device *dev,
                                struct device_attribute *attr, char *buf)
 {
        struct thermal_cooling_device *cdev = to_cooling_device(dev);
-       struct cooling_dev_stats *stats = cdev->stats;
-       int ret;
+       struct cooling_dev_stats *stats;
+       int ret = 0;
+
+       mutex_lock(&cdev->lock);
+
+       stats = cdev->stats;
+       if (!stats)
+               goto unlock;
 
        spin_lock(&stats->lock);
        ret = sprintf(buf, "%u\n", stats->total_trans);
        spin_unlock(&stats->lock);
 
+unlock:
+       mutex_unlock(&cdev->lock);
+
        return ret;
 }
 
@@ -721,11 +732,18 @@ time_in_state_ms_show(struct device *dev, struct device_attribute *attr,
                      char *buf)
 {
        struct thermal_cooling_device *cdev = to_cooling_device(dev);
-       struct cooling_dev_stats *stats = cdev->stats;
+       struct cooling_dev_stats *stats;
        ssize_t len = 0;
        int i;
 
+       mutex_lock(&cdev->lock);
+
+       stats = cdev->stats;
+       if (!stats)
+               goto unlock;
+
        spin_lock(&stats->lock);
+
        update_time_in_state(stats);
 
        for (i = 0; i <= cdev->max_state; i++) {
@@ -734,6 +752,9 @@ time_in_state_ms_show(struct device *dev, struct device_attribute *attr,
        }
        spin_unlock(&stats->lock);
 
+unlock:
+       mutex_unlock(&cdev->lock);
+
        return len;
 }
 
@@ -742,8 +763,16 @@ reset_store(struct device *dev, struct device_attribute *attr, const char *buf,
            size_t count)
 {
        struct thermal_cooling_device *cdev = to_cooling_device(dev);
-       struct cooling_dev_stats *stats = cdev->stats;
-       int i, states = cdev->max_state + 1;
+       struct cooling_dev_stats *stats;
+       int i, states;
+
+       mutex_lock(&cdev->lock);
+
+       stats = cdev->stats;
+       if (!stats)
+               goto unlock;
+
+       states = cdev->max_state + 1;
 
        spin_lock(&stats->lock);
 
@@ -757,6 +786,9 @@ reset_store(struct device *dev, struct device_attribute *attr, const char *buf,
 
        spin_unlock(&stats->lock);
 
+unlock:
+       mutex_unlock(&cdev->lock);
+
        return count;
 }
 
@@ -764,10 +796,18 @@ static ssize_t trans_table_show(struct device *dev,
                                struct device_attribute *attr, char *buf)
 {
        struct thermal_cooling_device *cdev = to_cooling_device(dev);
-       struct cooling_dev_stats *stats = cdev->stats;
+       struct cooling_dev_stats *stats;
        ssize_t len = 0;
        int i, j;
 
+       mutex_lock(&cdev->lock);
+
+       stats = cdev->stats;
+       if (!stats) {
+               len = -ENODATA;
+               goto unlock;
+       }
+
        len += snprintf(buf + len, PAGE_SIZE - len, " From  :    To\n");
        len += snprintf(buf + len, PAGE_SIZE - len, "       : ");
        for (i = 0; i <= cdev->max_state; i++) {
@@ -775,8 +815,10 @@ static ssize_t trans_table_show(struct device *dev,
                        break;
                len += snprintf(buf + len, PAGE_SIZE - len, "state%2u  ", i);
        }
-       if (len >= PAGE_SIZE)
-               return PAGE_SIZE;
+       if (len >= PAGE_SIZE) {
+               len = PAGE_SIZE;
+               goto unlock;
+       }
 
        len += snprintf(buf + len, PAGE_SIZE - len, "\n");
 
@@ -799,8 +841,12 @@ static ssize_t trans_table_show(struct device *dev,
 
        if (len >= PAGE_SIZE) {
                pr_warn_once("Thermal transition table exceeds PAGE_SIZE. Disabling\n");
-               return -EFBIG;
+               len = -EFBIG;
        }
+
+unlock:
+       mutex_unlock(&cdev->lock);
+
        return len;
 }
 
@@ -830,6 +876,8 @@ static void cooling_device_stats_setup(struct thermal_cooling_device *cdev)
        unsigned long states = cdev->max_state + 1;
        int var;
 
+       lockdep_assert_held(&cdev->lock);
+
        var = sizeof(*stats);
        var += sizeof(*stats->time_in_state) * states;
        var += sizeof(*stats->trans_table) * states * states;
@@ -855,6 +903,8 @@ out:
 
 static void cooling_device_stats_destroy(struct thermal_cooling_device *cdev)
 {
+       lockdep_assert_held(&cdev->lock);
+
        kfree(cdev->stats);
        cdev->stats = NULL;
 }
@@ -879,6 +929,12 @@ void thermal_cooling_device_destroy_sysfs(struct thermal_cooling_device *cdev)
        cooling_device_stats_destroy(cdev);
 }
 
+void thermal_cooling_device_stats_reinit(struct thermal_cooling_device *cdev)
+{
+       cooling_device_stats_destroy(cdev);
+       cooling_device_stats_setup(cdev);
+}
+
 /* these helper will be used only at the time of bindig */
 ssize_t
 trip_point_show(struct device *dev, struct device_attribute *attr, char *buf)
index 4339e706cc3a12ad455b01ab4a0e99898c06c9aa..f92ad71ef9831fa3611134c764af3af68b1c4582 100644 (file)
@@ -942,7 +942,8 @@ static void margining_port_remove(struct tb_port *port)
 
        snprintf(dir_name, sizeof(dir_name), "port%d", port->port);
        parent = debugfs_lookup(dir_name, port->sw->debugfs_dir);
-       debugfs_remove_recursive(debugfs_lookup("margining", parent));
+       if (parent)
+               debugfs_remove_recursive(debugfs_lookup("margining", parent));
 
        kfree(port->usb4->margining);
        port->usb4->margining = NULL;
@@ -967,19 +968,18 @@ static void margining_switch_init(struct tb_switch *sw)
 
 static void margining_switch_remove(struct tb_switch *sw)
 {
+       struct tb_port *upstream, *downstream;
        struct tb_switch *parent_sw;
-       struct tb_port *downstream;
        u64 route = tb_route(sw);
 
        if (!route)
                return;
 
-       /*
-        * Upstream is removed with the router itself but we need to
-        * remove the downstream port margining directory.
-        */
+       upstream = tb_upstream_port(sw);
        parent_sw = tb_switch_parent(sw);
        downstream = tb_port_at(route, parent_sw);
+
+       margining_port_remove(upstream);
        margining_port_remove(downstream);
 }
 
index 4dce2edd86ea0f9a4d223f2f6047e8a7db03d42e..cfebec107f3fc8a437718c376045cd4d8d734171 100644 (file)
@@ -46,7 +46,7 @@
 #define QUIRK_AUTO_CLEAR_INT   BIT(0)
 #define QUIRK_E2E              BIT(1)
 
-static int ring_interrupt_index(struct tb_ring *ring)
+static int ring_interrupt_index(const struct tb_ring *ring)
 {
        int bit = ring->hop;
        if (!ring->is_tx)
@@ -63,13 +63,14 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active)
 {
        int reg = REG_RING_INTERRUPT_BASE +
                  ring_interrupt_index(ring) / 32 * 4;
-       int bit = ring_interrupt_index(ring) & 31;
-       int mask = 1 << bit;
+       int interrupt_bit = ring_interrupt_index(ring) & 31;
+       int mask = 1 << interrupt_bit;
        u32 old, new;
 
        if (ring->irq > 0) {
                u32 step, shift, ivr, misc;
                void __iomem *ivr_base;
+               int auto_clear_bit;
                int index;
 
                if (ring->is_tx)
@@ -77,18 +78,25 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active)
                else
                        index = ring->hop + ring->nhi->hop_count;
 
-               if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT) {
-                       /*
-                        * Ask the hardware to clear interrupt status
-                        * bits automatically since we already know
-                        * which interrupt was triggered.
-                        */
-                       misc = ioread32(ring->nhi->iobase + REG_DMA_MISC);
-                       if (!(misc & REG_DMA_MISC_INT_AUTO_CLEAR)) {
-                               misc |= REG_DMA_MISC_INT_AUTO_CLEAR;
-                               iowrite32(misc, ring->nhi->iobase + REG_DMA_MISC);
-                       }
-               }
+               /*
+                * Intel routers support a bit that isn't part of
+                * the USB4 spec to ask the hardware to clear
+                * interrupt status bits automatically since
+                * we already know which interrupt was triggered.
+                *
+                * Other routers explicitly disable auto-clear
+                * to prevent conditions that may occur where two
+                * MSIX interrupts are simultaneously active and
+                * reading the register clears both of them.
+                */
+               misc = ioread32(ring->nhi->iobase + REG_DMA_MISC);
+               if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT)
+                       auto_clear_bit = REG_DMA_MISC_INT_AUTO_CLEAR;
+               else
+                       auto_clear_bit = REG_DMA_MISC_DISABLE_AUTO_CLEAR;
+               if (!(misc & auto_clear_bit))
+                       iowrite32(misc | auto_clear_bit,
+                                 ring->nhi->iobase + REG_DMA_MISC);
 
                ivr_base = ring->nhi->iobase + REG_INT_VEC_ALLOC_BASE;
                step = index / REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS;
@@ -108,7 +116,7 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active)
 
        dev_dbg(&ring->nhi->pdev->dev,
                "%s interrupt at register %#x bit %d (%#x -> %#x)\n",
-               active ? "enabling" : "disabling", reg, bit, old, new);
+               active ? "enabling" : "disabling", reg, interrupt_bit, old, new);
 
        if (new == old)
                dev_WARN(&ring->nhi->pdev->dev,
@@ -393,14 +401,17 @@ EXPORT_SYMBOL_GPL(tb_ring_poll_complete);
 
 static void ring_clear_msix(const struct tb_ring *ring)
 {
+       int bit;
+
        if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT)
                return;
 
+       bit = ring_interrupt_index(ring) & 31;
        if (ring->is_tx)
-               ioread32(ring->nhi->iobase + REG_RING_NOTIFY_BASE);
+               iowrite32(BIT(bit), ring->nhi->iobase + REG_RING_INT_CLEAR);
        else
-               ioread32(ring->nhi->iobase + REG_RING_NOTIFY_BASE +
-                        4 * (ring->nhi->hop_count / 32));
+               iowrite32(BIT(bit), ring->nhi->iobase + REG_RING_INT_CLEAR +
+                         4 * (ring->nhi->hop_count / 32));
 }
 
 static irqreturn_t ring_msix(int irq, void *data)
index 0d4970dcef842f7c8841ad933ef36940c8d5ed24..faef165a919ccda93a00fea018151210f68aec1b 100644 (file)
@@ -77,12 +77,13 @@ struct ring_desc {
 
 /*
  * three bitfields: tx, rx, rx overflow
- * Every bitfield contains one bit for every hop (REG_HOP_COUNT). Registers are
- * cleared on read. New interrupts are fired only after ALL registers have been
+ * Every bitfield contains one bit for every hop (REG_HOP_COUNT).
+ * New interrupts are fired only after ALL registers have been
  * read (even those containing only disabled rings).
  */
 #define REG_RING_NOTIFY_BASE   0x37800
 #define RING_NOTIFY_REG_COUNT(nhi) ((31 + 3 * nhi->hop_count) / 32)
+#define REG_RING_INT_CLEAR     0x37808
 
 /*
  * two bitfields: rx, tx
@@ -105,6 +106,7 @@ struct ring_desc {
 
 #define REG_DMA_MISC                   0x39864
 #define REG_DMA_MISC_INT_AUTO_CLEAR     BIT(2)
+#define REG_DMA_MISC_DISABLE_AUTO_CLEAR        BIT(17)
 
 #define REG_INMAIL_DATA                        0x39900
 
index b5f2ec79c4d6e53e20266aff1ee7c57d9008669f..1157b8869bcca1da217285202b74b53bea2bf703 100644 (file)
@@ -20,6 +20,25 @@ static void quirk_dp_credit_allocation(struct tb_switch *sw)
        }
 }
 
+static void quirk_clx_disable(struct tb_switch *sw)
+{
+       sw->quirks |= QUIRK_NO_CLX;
+       tb_sw_dbg(sw, "disabling CL states\n");
+}
+
+static void quirk_usb3_maximum_bandwidth(struct tb_switch *sw)
+{
+       struct tb_port *port;
+
+       tb_switch_for_each_port(sw, port) {
+               if (!tb_port_is_usb3_down(port))
+                       continue;
+               port->max_bw = 16376;
+               tb_port_dbg(port, "USB3 maximum bandwidth limited to %u Mb/s\n",
+                           port->max_bw);
+       }
+}
+
 struct tb_quirk {
        u16 hw_vendor_id;
        u16 hw_device_id;
@@ -37,6 +56,31 @@ static const struct tb_quirk tb_quirks[] = {
         * DP buffers.
         */
        { 0x8087, 0x0b26, 0x0000, 0x0000, quirk_dp_credit_allocation },
+       /*
+        * Limit the maximum USB3 bandwidth for the following Intel USB4
+        * host routers due to a hardware issue.
+        */
+       { 0x8087, PCI_DEVICE_ID_INTEL_ADL_NHI0, 0x0000, 0x0000,
+                 quirk_usb3_maximum_bandwidth },
+       { 0x8087, PCI_DEVICE_ID_INTEL_ADL_NHI1, 0x0000, 0x0000,
+                 quirk_usb3_maximum_bandwidth },
+       { 0x8087, PCI_DEVICE_ID_INTEL_RPL_NHI0, 0x0000, 0x0000,
+                 quirk_usb3_maximum_bandwidth },
+       { 0x8087, PCI_DEVICE_ID_INTEL_RPL_NHI1, 0x0000, 0x0000,
+                 quirk_usb3_maximum_bandwidth },
+       { 0x8087, PCI_DEVICE_ID_INTEL_MTL_M_NHI0, 0x0000, 0x0000,
+                 quirk_usb3_maximum_bandwidth },
+       { 0x8087, PCI_DEVICE_ID_INTEL_MTL_P_NHI0, 0x0000, 0x0000,
+                 quirk_usb3_maximum_bandwidth },
+       { 0x8087, PCI_DEVICE_ID_INTEL_MTL_P_NHI1, 0x0000, 0x0000,
+                 quirk_usb3_maximum_bandwidth },
+       /*
+        * CLx is not supported on AMD USB4 Yellow Carp and Pink Sardine platforms.
+        */
+       { 0x0438, 0x0208, 0x0000, 0x0000, quirk_clx_disable },
+       { 0x0438, 0x0209, 0x0000, 0x0000, quirk_clx_disable },
+       { 0x0438, 0x020a, 0x0000, 0x0000, quirk_clx_disable },
+       { 0x0438, 0x020b, 0x0000, 0x0000, quirk_clx_disable },
 };
 
 /**
index 56008eb91e2e448b9d584c318d6bc55c9af00a5b..9cc28197dbc45f7e6912ac43fc01d40fe5a80027 100644 (file)
@@ -187,6 +187,22 @@ static ssize_t nvm_authenticate_show(struct device *dev,
        return ret;
 }
 
+static void tb_retimer_set_inbound_sbtx(struct tb_port *port)
+{
+       int i;
+
+       for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
+               usb4_port_retimer_set_inbound_sbtx(port, i);
+}
+
+static void tb_retimer_unset_inbound_sbtx(struct tb_port *port)
+{
+       int i;
+
+       for (i = TB_MAX_RETIMER_INDEX; i >= 1; i--)
+               usb4_port_retimer_unset_inbound_sbtx(port, i);
+}
+
 static ssize_t nvm_authenticate_store(struct device *dev,
        struct device_attribute *attr, const char *buf, size_t count)
 {
@@ -213,6 +229,7 @@ static ssize_t nvm_authenticate_store(struct device *dev,
        rt->auth_status = 0;
 
        if (val) {
+               tb_retimer_set_inbound_sbtx(rt->port);
                if (val == AUTHENTICATE_ONLY) {
                        ret = tb_retimer_nvm_authenticate(rt, true);
                } else {
@@ -232,6 +249,7 @@ static ssize_t nvm_authenticate_store(struct device *dev,
        }
 
 exit_unlock:
+       tb_retimer_unset_inbound_sbtx(rt->port);
        mutex_unlock(&rt->tb->lock);
 exit_rpm:
        pm_runtime_mark_last_busy(&rt->dev);
@@ -440,8 +458,7 @@ int tb_retimer_scan(struct tb_port *port, bool add)
         * Enable sideband channel for each retimer. We can do this
         * regardless whether there is device connected or not.
         */
-       for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
-               usb4_port_retimer_set_inbound_sbtx(port, i);
+       tb_retimer_set_inbound_sbtx(port);
 
        /*
         * Before doing anything else, read the authentication status.
@@ -464,6 +481,8 @@ int tb_retimer_scan(struct tb_port *port, bool add)
                        break;
        }
 
+       tb_retimer_unset_inbound_sbtx(port);
+
        if (!last_idx)
                return 0;
 
index 5185cf3e4d978f0216ea8f8cd3327c3fc0be32ef..f37a4320f10a528393b8a10f320103b8565b124d 100644 (file)
@@ -20,6 +20,7 @@ enum usb4_sb_opcode {
        USB4_SB_OPCODE_ROUTER_OFFLINE = 0x4e45534c,             /* "LSEN" */
        USB4_SB_OPCODE_ENUMERATE_RETIMERS = 0x4d554e45,         /* "ENUM" */
        USB4_SB_OPCODE_SET_INBOUND_SBTX = 0x5055534c,           /* "LSUP" */
+       USB4_SB_OPCODE_UNSET_INBOUND_SBTX = 0x50555355,         /* "USUP" */
        USB4_SB_OPCODE_QUERY_LAST_RETIMER = 0x5453414c,         /* "LAST" */
        USB4_SB_OPCODE_GET_NVM_SECTOR_SIZE = 0x53534e47,        /* "GNSS" */
        USB4_SB_OPCODE_NVM_SET_OFFSET = 0x53504f42,             /* "BOPS" */
index 3370e18ba05f9348c37ec526bbd7641f2673225b..da373ac38285c6a1b35c8ed035689306fc9c045c 100644 (file)
@@ -2968,8 +2968,6 @@ int tb_switch_add(struct tb_switch *sw)
                        dev_warn(&sw->dev, "reading DROM failed: %d\n", ret);
                tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
 
-               tb_check_quirks(sw);
-
                ret = tb_switch_set_uuid(sw);
                if (ret) {
                        dev_err(&sw->dev, "failed to set UUID\n");
@@ -2988,6 +2986,8 @@ int tb_switch_add(struct tb_switch *sw)
                        }
                }
 
+               tb_check_quirks(sw);
+
                tb_switch_default_link_ports(sw);
 
                ret = tb_switch_update_link_attributes(sw);
index cbb20a2773462ab892c2e4e242becbe3085dc0bf..275ff5219a3a3ae79a69b0548aab891bc1824aa1 100644 (file)
 #define NVM_MAX_SIZE           SZ_512K
 #define NVM_DATA_DWORDS                16
 
+/* Keep link controller awake during update */
+#define QUIRK_FORCE_POWER_LINK_CONTROLLER              BIT(0)
+/* Disable CLx if not supported */
+#define QUIRK_NO_CLX                                   BIT(1)
+
 /**
  * struct tb_nvm - Structure holding NVM information
  * @dev: Owner of the NVM
@@ -267,6 +272,8 @@ struct tb_bandwidth_group {
  * @group: Bandwidth allocation group the adapter is assigned to. Only
  *        used for DP IN adapters for now.
  * @group_list: The adapter is linked to the group's list of ports through this
+ * @max_bw: Maximum possible bandwidth through this adapter if set to
+ *         non-zero.
  *
  * In USB4 terminology this structure represents an adapter (protocol or
  * lane adapter).
@@ -294,6 +301,7 @@ struct tb_port {
        unsigned int dma_credits;
        struct tb_bandwidth_group *group;
        struct list_head group_list;
+       unsigned int max_bw;
 };
 
 /**
@@ -1019,6 +1027,9 @@ static inline bool tb_switch_is_clx_enabled(const struct tb_switch *sw,
  */
 static inline bool tb_switch_is_clx_supported(const struct tb_switch *sw)
 {
+       if (sw->quirks & QUIRK_NO_CLX)
+               return false;
+
        return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
 }
 
@@ -1234,6 +1245,7 @@ int usb4_port_sw_margin(struct tb_port *port, unsigned int lanes, bool timing,
 int usb4_port_sw_margin_errors(struct tb_port *port, u32 *errors);
 
 int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index);
+int usb4_port_retimer_unset_inbound_sbtx(struct tb_port *port, u8 index);
 int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf,
                           u8 size);
 int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg,
@@ -1291,9 +1303,6 @@ struct usb4_port *usb4_port_device_add(struct tb_port *port);
 void usb4_port_device_remove(struct usb4_port *usb4);
 int usb4_port_device_resume(struct usb4_port *usb4);
 
-/* Keep link controller awake during update */
-#define QUIRK_FORCE_POWER_LINK_CONTROLLER              BIT(0)
-
 void tb_check_quirks(struct tb_switch *sw);
 
 #ifdef CONFIG_ACPI
index 1e5e9c147a310da3627f38f11296d0345e7f529c..a0996cb2893c8693cfc08fd2e0bf71966b04a34a 100644 (file)
@@ -1578,6 +1578,20 @@ int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index)
                                    500);
 }
 
+/**
+ * usb4_port_retimer_unset_inbound_sbtx() - Disable sideband channel transactions
+ * @port: USB4 port
+ * @index: Retimer index
+ *
+ * Disables sideband channel transations on SBTX. The reverse of
+ * usb4_port_retimer_set_inbound_sbtx().
+ */
+int usb4_port_retimer_unset_inbound_sbtx(struct tb_port *port, u8 index)
+{
+       return usb4_port_retimer_op(port, index,
+                                   USB4_SB_OPCODE_UNSET_INBOUND_SBTX, 500);
+}
+
 /**
  * usb4_port_retimer_read() - Read from retimer sideband registers
  * @port: USB4 port
@@ -1868,6 +1882,15 @@ int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index,
                                usb4_port_retimer_nvm_read_block, &info);
 }
 
+static inline unsigned int
+usb4_usb3_port_max_bandwidth(const struct tb_port *port, unsigned int bw)
+{
+       /* Take the possible bandwidth limitation into account */
+       if (port->max_bw)
+               return min(bw, port->max_bw);
+       return bw;
+}
+
 /**
  * usb4_usb3_port_max_link_rate() - Maximum support USB3 link rate
  * @port: USB3 adapter port
@@ -1889,7 +1912,9 @@ int usb4_usb3_port_max_link_rate(struct tb_port *port)
                return ret;
 
        lr = (val & ADP_USB3_CS_4_MSLR_MASK) >> ADP_USB3_CS_4_MSLR_SHIFT;
-       return lr == ADP_USB3_CS_4_MSLR_20G ? 20000 : 10000;
+       ret = lr == ADP_USB3_CS_4_MSLR_20G ? 20000 : 10000;
+
+       return usb4_usb3_port_max_bandwidth(port, ret);
 }
 
 /**
@@ -1916,7 +1941,9 @@ int usb4_usb3_port_actual_link_rate(struct tb_port *port)
                return 0;
 
        lr = val & ADP_USB3_CS_4_ALR_MASK;
-       return lr == ADP_USB3_CS_4_ALR_20G ? 20000 : 10000;
+       ret = lr == ADP_USB3_CS_4_ALR_20G ? 20000 : 10000;
+
+       return usb4_usb3_port_max_bandwidth(port, ret);
 }
 
 static int usb4_usb3_port_cm_request(struct tb_port *port, bool request)
@@ -2067,18 +2094,30 @@ static int usb4_usb3_port_write_allocated_bandwidth(struct tb_port *port,
                                                    int downstream_bw)
 {
        u32 val, ubw, dbw, scale;
-       int ret;
+       int ret, max_bw;
 
-       /* Read the used scale, hardware default is 0 */
-       ret = tb_port_read(port, &scale, TB_CFG_PORT,
-                          port->cap_adap + ADP_USB3_CS_3, 1);
+       /* Figure out suitable scale */
+       scale = 0;
+       max_bw = max(upstream_bw, downstream_bw);
+       while (scale < 64) {
+               if (mbps_to_usb3_bw(max_bw, scale) < 4096)
+                       break;
+               scale++;
+       }
+
+       if (WARN_ON(scale >= 64))
+               return -EINVAL;
+
+       ret = tb_port_write(port, &scale, TB_CFG_PORT,
+                           port->cap_adap + ADP_USB3_CS_3, 1);
        if (ret)
                return ret;
 
-       scale &= ADP_USB3_CS_3_SCALE_MASK;
        ubw = mbps_to_usb3_bw(upstream_bw, scale);
        dbw = mbps_to_usb3_bw(downstream_bw, scale);
 
+       tb_port_dbg(port, "scaled bandwidth %u/%u, scale %u\n", ubw, dbw, scale);
+
        ret = tb_port_read(port, &val, TB_CFG_PORT,
                           port->cap_adap + ADP_USB3_CS_2, 1);
        if (ret)
index 5bddb2f5e93185eca993595dd0c44f7cc7d1ec46..98764e740c07830495dc5d3532e08d3b40a9ee7b 100644 (file)
@@ -43,6 +43,7 @@ struct xencons_info {
        int irq;
        int vtermno;
        grant_ref_t gntref;
+       spinlock_t ring_lock;
 };
 
 static LIST_HEAD(xenconsoles);
@@ -89,12 +90,15 @@ static int __write_console(struct xencons_info *xencons,
        XENCONS_RING_IDX cons, prod;
        struct xencons_interface *intf = xencons->intf;
        int sent = 0;
+       unsigned long flags;
 
+       spin_lock_irqsave(&xencons->ring_lock, flags);
        cons = intf->out_cons;
        prod = intf->out_prod;
        mb();                   /* update queue values before going on */
 
        if ((prod - cons) > sizeof(intf->out)) {
+               spin_unlock_irqrestore(&xencons->ring_lock, flags);
                pr_err_once("xencons: Illegal ring page indices");
                return -EINVAL;
        }
@@ -104,6 +108,7 @@ static int __write_console(struct xencons_info *xencons,
 
        wmb();                  /* write ring before updating pointer */
        intf->out_prod = prod;
+       spin_unlock_irqrestore(&xencons->ring_lock, flags);
 
        if (sent)
                notify_daemon(xencons);
@@ -146,16 +151,19 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len)
        int recv = 0;
        struct xencons_info *xencons = vtermno_to_xencons(vtermno);
        unsigned int eoiflag = 0;
+       unsigned long flags;
 
        if (xencons == NULL)
                return -EINVAL;
        intf = xencons->intf;
 
+       spin_lock_irqsave(&xencons->ring_lock, flags);
        cons = intf->in_cons;
        prod = intf->in_prod;
        mb();                   /* get pointers before reading ring */
 
        if ((prod - cons) > sizeof(intf->in)) {
+               spin_unlock_irqrestore(&xencons->ring_lock, flags);
                pr_err_once("xencons: Illegal ring page indices");
                return -EINVAL;
        }
@@ -179,10 +187,13 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len)
                xencons->out_cons = intf->out_cons;
                xencons->out_cons_same = 0;
        }
+       if (!recv && xencons->out_cons_same++ > 1) {
+               eoiflag = XEN_EOI_FLAG_SPURIOUS;
+       }
+       spin_unlock_irqrestore(&xencons->ring_lock, flags);
+
        if (recv) {
                notify_daemon(xencons);
-       } else if (xencons->out_cons_same++ > 1) {
-               eoiflag = XEN_EOI_FLAG_SPURIOUS;
        }
 
        xen_irq_lateeoi(xencons->irq, eoiflag);
@@ -239,6 +250,7 @@ static int xen_hvm_console_init(void)
                info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL);
                if (!info)
                        return -ENOMEM;
+               spin_lock_init(&info->ring_lock);
        } else if (info->intf != NULL) {
                /* already configured */
                return 0;
@@ -275,6 +287,7 @@ err:
 
 static int xencons_info_pv_init(struct xencons_info *info, int vtermno)
 {
+       spin_lock_init(&info->ring_lock);
        info->evtchn = xen_start_info->console.domU.evtchn;
        /* GFN == MFN for PV guest */
        info->intf = gfn_to_virt(xen_start_info->console.domU.mfn);
@@ -325,6 +338,7 @@ static int xen_initial_domain_console_init(void)
                info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL);
                if (!info)
                        return -ENOMEM;
+               spin_lock_init(&info->ring_lock);
        }
 
        info->irq = bind_virq_to_irq(VIRQ_CONSOLE, 0, false);
@@ -482,6 +496,7 @@ static int xencons_probe(struct xenbus_device *dev,
        info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL);
        if (!info)
                return -ENOMEM;
+       spin_lock_init(&info->ring_lock);
        dev_set_drvdata(&dev->dev, info);
        info->xbdev = dev;
        info->vtermno = xenbus_devid_to_vtermno(devid);
index aa80de3a819478e83b20839bce704c615afcf9b4..678014253b7b2642147d5ffe5b6367d0727f1a59 100644 (file)
@@ -534,7 +534,7 @@ static int of_serdev_register_devices(struct serdev_controller *ctrl)
                if (!serdev)
                        continue;
 
-               serdev->dev.of_node = node;
+               device_set_node(&serdev->dev, of_fwnode_handle(node));
 
                err = serdev_device_add(serdev);
                if (err) {
index f8e99995eee911de69ceb5eb8d95dec876cb1acd..d94c3811a8f7af310528a36df2dd99fd28256c69 100644 (file)
@@ -106,8 +106,8 @@ static int serial8250_em_probe(struct platform_device *pdev)
        memset(&up, 0, sizeof(up));
        up.port.mapbase = regs->start;
        up.port.irq = irq;
-       up.port.type = PORT_UNKNOWN;
-       up.port.flags = UPF_BOOT_AUTOCONF | UPF_FIXED_PORT | UPF_IOREMAP;
+       up.port.type = PORT_16750;
+       up.port.flags = UPF_FIXED_PORT | UPF_IOREMAP | UPF_FIXED_TYPE;
        up.port.dev = &pdev->dev;
        up.port.private_data = priv;
 
index 8aad15622a2e5c869b94dfec762f98185e1d785e..8adfaa183f778d4a6f8cf21098025b2dac886691 100644 (file)
@@ -34,7 +34,7 @@ int fsl8250_handle_irq(struct uart_port *port)
 
        iir = port->serial_in(port, UART_IIR);
        if (iir & UART_IIR_NO_INT) {
-               spin_unlock(&up->port.lock);
+               spin_unlock_irqrestore(&up->port.lock, flags);
                return 0;
        }
 
@@ -42,7 +42,7 @@ int fsl8250_handle_irq(struct uart_port *port)
        if (unlikely(up->lsr_saved_flags & UART_LSR_BI)) {
                up->lsr_saved_flags &= ~UART_LSR_BI;
                port->serial_in(port, UART_RX);
-               spin_unlock(&up->port.lock);
+               spin_unlock_irqrestore(&up->port.lock, flags);
                return 1;
        }
 
index 978dc196c29bea32ca85477337667f2449ec4fde..5313aa31930f4ec4ea8f798a03f8c08066b86b36 100644 (file)
@@ -257,8 +257,9 @@ config SERIAL_8250_ASPEED_VUART
        tristate "Aspeed Virtual UART"
        depends on SERIAL_8250
        depends on OF
-       depends on REGMAP && MFD_SYSCON
+       depends on MFD_SYSCON
        depends on ARCH_ASPEED || COMPILE_TEST
+       select REGMAP
        help
          If you want to use the virtual UART (VUART) device on Aspeed
          BMC platforms, enable this option. This enables the 16550A-
@@ -299,7 +300,6 @@ config SERIAL_8250_PCI1XXXX
        tristate "Microchip 8250 based serial port"
        depends on SERIAL_8250 && PCI
        select SERIAL_8250_PCILIB
-       default SERIAL_8250
        help
         Select this option if you have a setup with Microchip PCIe
         Switch with serial port enabled and wish to enable 8250
index 625358f44419743ffbc3d1663d1aae455df119b6..0072892ca7fc9df89a104270d1e35924cbe00746 100644 (file)
@@ -1313,7 +1313,7 @@ config SERIAL_FSL_LPUART
 
 config SERIAL_FSL_LPUART_CONSOLE
        bool "Console on Freescale lpuart serial port"
-       depends on SERIAL_FSL_LPUART
+       depends on SERIAL_FSL_LPUART=y
        select SERIAL_CORE_CONSOLE
        select SERIAL_EARLYCON
        help
index e945f41b93d4384b573c0ffe279da5c50c187ba1..56e6ba3250cd111f114212afa8224524c76e68e9 100644 (file)
@@ -1354,6 +1354,7 @@ static void lpuart_dma_rx_free(struct uart_port *port)
        struct dma_chan *chan = sport->dma_rx_chan;
 
        dmaengine_terminate_sync(chan);
+       del_timer_sync(&sport->lpuart_timer);
        dma_unmap_sg(chan->device->dev, &sport->rx_sgl, 1, DMA_FROM_DEVICE);
        kfree(sport->rx_ring.buf);
        sport->rx_ring.tail = 0;
@@ -1813,7 +1814,6 @@ static int lpuart32_startup(struct uart_port *port)
 static void lpuart_dma_shutdown(struct lpuart_port *sport)
 {
        if (sport->lpuart_dma_rx_use) {
-               del_timer_sync(&sport->lpuart_timer);
                lpuart_dma_rx_free(&sport->port);
                sport->lpuart_dma_rx_use = false;
        }
@@ -1973,10 +1973,8 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
         * Since timer function acqures sport->port.lock, need to stop before
         * acquring same lock because otherwise del_timer_sync() can deadlock.
         */
-       if (old && sport->lpuart_dma_rx_use) {
-               del_timer_sync(&sport->lpuart_timer);
+       if (old && sport->lpuart_dma_rx_use)
                lpuart_dma_rx_free(&sport->port);
-       }
 
        spin_lock_irqsave(&sport->port.lock, flags);
 
@@ -2210,10 +2208,8 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
         * Since timer function acqures sport->port.lock, need to stop before
         * acquring same lock because otherwise del_timer_sync() can deadlock.
         */
-       if (old && sport->lpuart_dma_rx_use) {
-               del_timer_sync(&sport->lpuart_timer);
+       if (old && sport->lpuart_dma_rx_use)
                lpuart_dma_rx_free(&sport->port);
-       }
 
        spin_lock_irqsave(&sport->port.lock, flags);
 
@@ -2240,9 +2236,15 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
        /* update the per-port timeout */
        uart_update_timeout(port, termios->c_cflag, baud);
 
-       /* wait transmit engin complete */
-       lpuart32_write(&sport->port, 0, UARTMODIR);
-       lpuart32_wait_bit_set(&sport->port, UARTSTAT, UARTSTAT_TC);
+       /*
+        * LPUART Transmission Complete Flag may never be set while queuing a break
+        * character, so skip waiting for transmission complete when UARTCTRL_SBK is
+        * asserted.
+        */
+       if (!(old_ctrl & UARTCTRL_SBK)) {
+               lpuart32_write(&sport->port, 0, UARTMODIR);
+               lpuart32_wait_bit_set(&sport->port, UARTSTAT, UARTSTAT_TC);
+       }
 
        /* disable transmit and receive */
        lpuart32_write(&sport->port, old_ctrl & ~(UARTCTRL_TE | UARTCTRL_RE),
@@ -3014,7 +3016,6 @@ static int lpuart_suspend(struct device *dev)
                         * cannot resume as expected, hence gracefully release the
                         * Rx DMA path before suspend and start Rx DMA path on resume.
                         */
-                       del_timer_sync(&sport->lpuart_timer);
                        lpuart_dma_rx_free(&sport->port);
 
                        /* Disable Rx DMA to use UART port as wakeup source */
index d69592e5e2ec581743d1c155c8ad469453a701d4..28fbc927a546574e6d4559063034f629caffd9ed 100644 (file)
@@ -596,7 +596,7 @@ static void qcom_geni_serial_stop_tx_dma(struct uart_port *uport)
        if (!qcom_geni_serial_main_active(uport))
                return;
 
-       if (port->rx_dma_addr) {
+       if (port->tx_dma_addr) {
                geni_se_tx_dma_unprep(&port->se, port->tx_dma_addr,
                                      port->tx_remaining);
                port->tx_dma_addr = 0;
@@ -631,9 +631,8 @@ static void qcom_geni_serial_start_tx_dma(struct uart_port *uport)
        if (port->tx_dma_addr)
                return;
 
-       xmit_size = uart_circ_chars_pending(xmit);
-       if (xmit_size < WAKEUP_CHARS)
-               uart_write_wakeup(uport);
+       if (uart_circ_empty(xmit))
+               return;
 
        xmit_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
 
@@ -1070,6 +1069,10 @@ static int setup_fifos(struct qcom_geni_serial_port *port)
 static void qcom_geni_serial_shutdown(struct uart_port *uport)
 {
        disable_irq(uport->irq);
+
+       if (uart_console(uport))
+               return;
+
        qcom_geni_serial_stop_tx(uport);
        qcom_geni_serial_stop_rx(uport);
 }
index 57a5c23b51d47429c4c1d2d5a7a80d16beb2b75b..3c2ea9c098f7c4b36f1687f1b4834fe7de0a3e28 100644 (file)
@@ -4545,6 +4545,9 @@ static int con_font_get(struct vc_data *vc, struct console_font_op *op)
        int c;
        unsigned int vpitch = op->op == KD_FONT_OP_GET_TALL ? op->height : 32;
 
+       if (vpitch > max_font_height)
+               return -EINVAL;
+
        if (op->data) {
                font.data = kvmalloc(max_font_size, GFP_KERNEL);
                if (!font.data)
index 05eac965ee2759ccef20c3120727a5acb3fac94e..37e178a9ac4757bb4cd1e796b612726215bf63aa 100644 (file)
@@ -1500,7 +1500,7 @@ start_window:
        scaling->window_start_t = curr_t;
        scaling->tot_busy_t = 0;
 
-       if (hba->outstanding_reqs) {
+       if (scaling->active_reqs) {
                scaling->busy_start_t = curr_t;
                scaling->is_busy_started = true;
        } else {
@@ -2118,7 +2118,7 @@ static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
 
        spin_lock_irqsave(hba->host->host_lock, flags);
        hba->clk_scaling.active_reqs--;
-       if (!hba->outstanding_reqs && scaling->is_busy_started) {
+       if (!scaling->active_reqs && scaling->is_busy_started) {
                scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
                                        scaling->busy_start_t));
                scaling->busy_start_t = 0;
index deeea618ba33beccc840169a2ae85d1f6a8445d8..1f6320d98a76ba37b03e928c9ae965c9d5a01272 100644 (file)
@@ -60,6 +60,11 @@ static struct pci_dev *cdns3_get_second_fun(struct pci_dev *pdev)
                        return NULL;
        }
 
+       if (func->devfn != PCI_DEV_FN_HOST_DEVICE &&
+           func->devfn != PCI_DEV_FN_OTG) {
+               return NULL;
+       }
+
        return func;
 }
 
index 9b8325f824992ad7829a12bb880304dcffe0e024..d63d5d92f2554200b8ad138bf69ea6aacf99b6bd 100644 (file)
@@ -403,20 +403,6 @@ static int cdnsp_ep0_std_request(struct cdnsp_device *pdev,
        case USB_REQ_SET_ISOCH_DELAY:
                ret = cdnsp_ep0_set_isoch_delay(pdev, ctrl);
                break;
-       case USB_REQ_SET_INTERFACE:
-               /*
-                * Add request into pending list to block sending status stage
-                * by libcomposite.
-                */
-               list_add_tail(&pdev->ep0_preq.list,
-                             &pdev->ep0_preq.pep->pending_list);
-
-               ret = cdnsp_ep0_delegate_req(pdev, ctrl);
-               if (ret == -EBUSY)
-                       ret = 0;
-
-               list_del(&pdev->ep0_preq.list);
-               break;
        default:
                ret = cdnsp_ep0_delegate_req(pdev, ctrl);
                break;
@@ -474,9 +460,6 @@ void cdnsp_setup_analyze(struct cdnsp_device *pdev)
        else
                ret = cdnsp_ep0_delegate_req(pdev, ctrl);
 
-       if (!len)
-               pdev->ep0_stage = CDNSP_STATUS_STAGE;
-
        if (ret == USB_GADGET_DELAYED_STATUS) {
                trace_cdnsp_ep0_status_stage("delayed");
                return;
@@ -484,6 +467,6 @@ void cdnsp_setup_analyze(struct cdnsp_device *pdev)
 out:
        if (ret < 0)
                cdnsp_ep0_stall(pdev);
-       else if (pdev->ep0_stage == CDNSP_STATUS_STAGE)
+       else if (!len && pdev->ep0_stage != CDNSP_STATUS_STAGE)
                cdnsp_status_stage(pdev);
 }
index efd54ed918b9701e6f765408f298391219ffca3e..7b151f5af3ccb82ceb37c0a71a7c63da677d3d7b 100644 (file)
 #define PLAT_DRIVER_NAME       "cdns-usbssp"
 
 #define CDNS_VENDOR_ID         0x17cd
-#define CDNS_DEVICE_ID         0x0100
+#define CDNS_DEVICE_ID         0x0200
+#define CDNS_DRD_ID            0x0100
 #define CDNS_DRD_IF            (PCI_CLASS_SERIAL_USB << 8 | 0x80)
 
 static struct pci_dev *cdnsp_get_second_fun(struct pci_dev *pdev)
 {
-       struct pci_dev *func;
-
        /*
         * Gets the second function.
-        * It's little tricky, but this platform has two function.
-        * The fist keeps resources for Host/Device while the second
-        * keeps resources for DRD/OTG.
+        * Platform has two function. The fist keeps resources for
+        * Host/Device while the secon keeps resources for DRD/OTG.
         */
-       func = pci_get_device(pdev->vendor, pdev->device, NULL);
-       if (!func)
-               return NULL;
+       if (pdev->device == CDNS_DEVICE_ID)
+               return  pci_get_device(pdev->vendor, CDNS_DRD_ID, NULL);
+       else if (pdev->device == CDNS_DRD_ID)
+               return pci_get_device(pdev->vendor, CDNS_DEVICE_ID, NULL);
 
-       if (func->devfn == pdev->devfn) {
-               func = pci_get_device(pdev->vendor, pdev->device, func);
-               if (!func)
-                       return NULL;
-       }
-
-       return func;
+       return NULL;
 }
 
 static int cdnsp_pci_probe(struct pci_dev *pdev,
@@ -230,6 +223,8 @@ static const struct pci_device_id cdnsp_pci_ids[] = {
          PCI_CLASS_SERIAL_USB_DEVICE, PCI_ANY_ID },
        { PCI_VENDOR_ID_CDNS, CDNS_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
          CDNS_DRD_IF, PCI_ANY_ID },
+       { PCI_VENDOR_ID_CDNS, CDNS_DRD_ID, PCI_ANY_ID, PCI_ANY_ID,
+         CDNS_DRD_IF, PCI_ANY_ID },
        { 0, }
 };
 
index 005c67cb3afb72cadc171d73cc5c25887a7939b3..f210b7489fd5b8ff825905da38c87674ce5a87a0 100644 (file)
@@ -208,6 +208,7 @@ struct hw_bank {
  * @in_lpm: if the core in low power mode
  * @wakeup_int: if wakeup interrupt occur
  * @rev: The revision number for controller
+ * @mutex: protect code from concorrent running when doing role switch
  */
 struct ci_hdrc {
        struct device                   *dev;
@@ -260,6 +261,7 @@ struct ci_hdrc {
        bool                            in_lpm;
        bool                            wakeup_int;
        enum ci_revision                rev;
+       struct mutex                    mutex;
 };
 
 static inline struct ci_role_driver *ci_role(struct ci_hdrc *ci)
index 27c601296130e23f5b3d740d2b2944ccb2bbe586..281fc51720cea2ab9d0d63b486fb38c6c4f4ebfc 100644 (file)
@@ -984,9 +984,16 @@ static ssize_t role_store(struct device *dev,
                             strlen(ci->roles[role]->name)))
                        break;
 
-       if (role == CI_ROLE_END || role == ci->role)
+       if (role == CI_ROLE_END)
                return -EINVAL;
 
+       mutex_lock(&ci->mutex);
+
+       if (role == ci->role) {
+               mutex_unlock(&ci->mutex);
+               return n;
+       }
+
        pm_runtime_get_sync(dev);
        disable_irq(ci->irq);
        ci_role_stop(ci);
@@ -995,6 +1002,7 @@ static ssize_t role_store(struct device *dev,
                ci_handle_vbus_change(ci);
        enable_irq(ci->irq);
        pm_runtime_put_sync(dev);
+       mutex_unlock(&ci->mutex);
 
        return (ret == 0) ? n : ret;
 }
@@ -1030,6 +1038,7 @@ static int ci_hdrc_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        spin_lock_init(&ci->lock);
+       mutex_init(&ci->mutex);
        ci->dev = dev;
        ci->platdata = dev_get_platdata(dev);
        ci->imx28_write_fix = !!(ci->platdata->flags &
index 622c3b68aa1e6a4bf55370d42ab4d3768eb478d8..f5490f2a5b6bca6b725c596d7536c2b548547d4b 100644 (file)
@@ -167,8 +167,10 @@ static int hw_wait_vbus_lower_bsv(struct ci_hdrc *ci)
 
 void ci_handle_id_switch(struct ci_hdrc *ci)
 {
-       enum ci_role role = ci_otg_role(ci);
+       enum ci_role role;
 
+       mutex_lock(&ci->mutex);
+       role = ci_otg_role(ci);
        if (role != ci->role) {
                dev_dbg(ci->dev, "switching from %s to %s\n",
                        ci_role(ci)->name, ci->roles[role]->name);
@@ -198,6 +200,7 @@ void ci_handle_id_switch(struct ci_hdrc *ci)
                if (role == CI_ROLE_GADGET)
                        ci_handle_vbus_change(ci);
        }
+       mutex_unlock(&ci->mutex);
 }
 /**
  * ci_otg_work - perform otg (vbus/id) event handle
index d8d6493bc4576ba767d0c13b0255592cbece1595..a8605b02115b1c3d0c6038c1dbb72e0e9ab3bac9 100644 (file)
@@ -35,7 +35,8 @@ static void dwc2_ovr_init(struct dwc2_hsotg *hsotg)
 
        spin_unlock_irqrestore(&hsotg->lock, flags);
 
-       dwc2_force_mode(hsotg, (hsotg->dr_mode == USB_DR_MODE_HOST));
+       dwc2_force_mode(hsotg, (hsotg->dr_mode == USB_DR_MODE_HOST) ||
+                               (hsotg->role_sw_default_mode == USB_DR_MODE_HOST));
 }
 
 static int dwc2_ovr_avalid(struct dwc2_hsotg *hsotg, bool valid)
index 62fa6378d2d73c63365e22ef0ea916517372a589..8b15742d9e8aa03301c41a7192207b33dd1a7293 100644 (file)
@@ -4549,8 +4549,7 @@ static int dwc2_hsotg_udc_start(struct usb_gadget *gadget,
        hsotg->gadget.dev.of_node = hsotg->dev->of_node;
        hsotg->gadget.speed = USB_SPEED_UNKNOWN;
 
-       if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL ||
-           (hsotg->dr_mode == USB_DR_MODE_OTG && dwc2_is_device_mode(hsotg))) {
+       if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) {
                ret = dwc2_lowlevel_hw_enable(hsotg);
                if (ret)
                        goto err;
@@ -4612,8 +4611,7 @@ static int dwc2_hsotg_udc_stop(struct usb_gadget *gadget)
        if (!IS_ERR_OR_NULL(hsotg->uphy))
                otg_set_peripheral(hsotg->uphy->otg, NULL);
 
-       if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL ||
-           (hsotg->dr_mode == USB_DR_MODE_OTG && dwc2_is_device_mode(hsotg)))
+       if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
                dwc2_lowlevel_hw_disable(hsotg);
 
        return 0;
index 23ef759968231a80bab5e78cdf3217d7771298fc..d1589ba7d322dc6ccf8427ab29c131059c4ee80a 100644 (file)
@@ -91,13 +91,6 @@ static int dwc2_get_dr_mode(struct dwc2_hsotg *hsotg)
        return 0;
 }
 
-static void __dwc2_disable_regulators(void *data)
-{
-       struct dwc2_hsotg *hsotg = data;
-
-       regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), hsotg->supplies);
-}
-
 static int __dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg)
 {
        struct platform_device *pdev = to_platform_device(hsotg->dev);
@@ -108,11 +101,6 @@ static int __dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg)
        if (ret)
                return ret;
 
-       ret = devm_add_action_or_reset(&pdev->dev,
-                                      __dwc2_disable_regulators, hsotg);
-       if (ret)
-               return ret;
-
        if (hsotg->clk) {
                ret = clk_prepare_enable(hsotg->clk);
                if (ret)
@@ -168,7 +156,7 @@ static int __dwc2_lowlevel_hw_disable(struct dwc2_hsotg *hsotg)
        if (hsotg->clk)
                clk_disable_unprepare(hsotg->clk);
 
-       return 0;
+       return regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), hsotg->supplies);
 }
 
 /**
@@ -576,8 +564,7 @@ static int dwc2_driver_probe(struct platform_device *dev)
        dwc2_debugfs_init(hsotg);
 
        /* Gadget code manages lowlevel hw on its own */
-       if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL ||
-           (hsotg->dr_mode == USB_DR_MODE_OTG && dwc2_is_device_mode(hsotg)))
+       if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
                dwc2_lowlevel_hw_disable(hsotg);
 
 #if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
@@ -608,7 +595,7 @@ error_init:
        if (hsotg->params.activate_stm_id_vb_detection)
                regulator_disable(hsotg->usb33d);
 error:
-       if (hsotg->dr_mode != USB_DR_MODE_PERIPHERAL)
+       if (hsotg->ll_hw_enabled)
                dwc2_lowlevel_hw_disable(hsotg);
        return retval;
 }
index 582ebd9cf9c2e1ed4867404203efe8c9a0e11254..4743e918dcafa96aea30b238ca24d44c36745448 100644 (file)
@@ -1098,7 +1098,7 @@ struct dwc3_scratchpad_array {
  *                     change quirk.
  * @dis_tx_ipgap_linecheck_quirk: set if we disable u2mac linestate
  *                     check during HS transmit.
- * @resume-hs-terminations: Set if we enable quirk for fixing improper crc
+ * @resume_hs_terminations: Set if we enable quirk for fixing improper crc
  *                     generation after resume from suspend.
  * @parkmode_disable_ss_quirk: set if we need to disable all SuperSpeed
  *                     instances in park mode.
index 3c63fa97a680041f7345b1f6aadddda4e6207a2b..cf5b4f49c3ed8241fdf34d5ebd9ae15d78c14e89 100644 (file)
@@ -1699,6 +1699,7 @@ static int __dwc3_gadget_get_frame(struct dwc3 *dwc)
  */
 static int __dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool interrupt)
 {
+       struct dwc3 *dwc = dep->dwc;
        struct dwc3_gadget_ep_cmd_params params;
        u32 cmd;
        int ret;
@@ -1722,10 +1723,13 @@ static int __dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool int
        WARN_ON_ONCE(ret);
        dep->resource_index = 0;
 
-       if (!interrupt)
+       if (!interrupt) {
+               if (!DWC3_IP_IS(DWC3) || DWC3_VER_IS_PRIOR(DWC3, 310A))
+                       mdelay(1);
                dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
-       else if (!ret)
+       } else if (!ret) {
                dep->flags |= DWC3_EP_END_TRANSFER_PENDING;
+       }
 
        dep->flags &= ~DWC3_EP_DELAY_STOP;
        return ret;
@@ -3774,7 +3778,11 @@ void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
         * enabled, the EndTransfer command will have completed upon
         * returning from this function.
         *
-        * This mode is NOT available on the DWC_usb31 IP.
+        * This mode is NOT available on the DWC_usb31 IP.  In this
+        * case, if the IOC bit is not set, then delay by 1ms
+        * after issuing the EndTransfer command.  This allows for the
+        * controller to handle the command completely before DWC3
+        * remove requests attempts to unmap USB request buffers.
         */
 
        __dwc3_stop_active_transfer(dep, force, interrupt);
index fa7dd6cf014d7b093da3fea48aee1a6981e9157e..5377d873c08eb6fb57895e1fe127f8b0fa16b68e 100644 (file)
@@ -2079,10 +2079,9 @@ unknown:
                                sizeof(url_descriptor->URL)
                                - WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH + landing_page_offset);
 
-                       if (ctrl->wLength < WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH
-                                           + landing_page_length)
-                               landing_page_length = ctrl->wLength
-                                       - WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH + landing_page_offset;
+                       if (w_length < WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH + landing_page_length)
+                               landing_page_length = w_length
+                               - WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH + landing_page_offset;
 
                        memcpy(url_descriptor->URL,
                                cdev->landing_page + landing_page_offset,
index c1f62e91b012634498bb7b299e59a018c28076d6..4a42574b4a7feb9ecc230889fe66f85e3c5ab8ea 100644 (file)
@@ -1422,7 +1422,7 @@ void g_audio_cleanup(struct g_audio *g_audio)
        uac = g_audio->uac;
        card = uac->card;
        if (card)
-               snd_card_free(card);
+               snd_card_free_when_closed(card);
 
        kfree(uac->p_prm.reqs);
        kfree(uac->c_prm.reqs);
index 5402e4b7267b9957f0f6300099f20ccc50398944..12fc6eb67c3bf23a253ee1f48667499021d830d7 100644 (file)
@@ -410,6 +410,7 @@ static const struct usb_device_id onboard_hub_id_table[] = {
        { USB_DEVICE(VENDOR_ID_GENESYS, 0x0608) }, /* Genesys Logic GL850G USB 2.0 */
        { USB_DEVICE(VENDOR_ID_GENESYS, 0x0610) }, /* Genesys Logic GL852G USB 2.0 */
        { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2514) }, /* USB2514B USB 2.0 */
+       { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2517) }, /* USB2517 USB 2.0 */
        { USB_DEVICE(VENDOR_ID_REALTEK, 0x0411) }, /* RTS5411 USB 3.1 */
        { USB_DEVICE(VENDOR_ID_REALTEK, 0x5411) }, /* RTS5411 USB 2.1 */
        { USB_DEVICE(VENDOR_ID_REALTEK, 0x0414) }, /* RTS5414 USB 3.2 */
index 0a943a1546490293c3342609dd960f71cf046b50..aca5f50eb0da77f471816219d1e0d37ad57589cc 100644 (file)
@@ -36,6 +36,7 @@ static const struct onboard_hub_pdata vialab_vl817_data = {
 
 static const struct of_device_id onboard_hub_match[] = {
        { .compatible = "usb424,2514", .data = &microchip_usb424_data, },
+       { .compatible = "usb424,2517", .data = &microchip_usb424_data, },
        { .compatible = "usb451,8140", .data = &ti_tusb8041_data, },
        { .compatible = "usb451,8142", .data = &ti_tusb8041_data, },
        { .compatible = "usb5e3,608", .data = &genesys_gl850g_data, },
index c7b763d6d102394aba9080cfc104db9cb2b3c9c3..1f8c9b16a0fb850de066b2d1c071ff933fa4f4bf 100644 (file)
@@ -111,6 +111,13 @@ UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_BROKEN_FUA),
 
+/* Reported by: Yaroslav Furman <yaro330@gmail.com> */
+UNUSUAL_DEV(0x152d, 0x0583, 0x0000, 0x9999,
+               "JMicron",
+               "JMS583Gen 2",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_NO_REPORT_OPCODES),
+
 /* Reported-by: Thinh Nguyen <thinhn@synopsys.com> */
 UNUSUAL_DEV(0x154b, 0xf00b, 0x0000, 0x9999,
                "PNY",
index a0d943d785800fbbdb36a2cc5f206b5841697d64..1ee774c263f08c06ea87f28dcca1a83ef614e279 100644 (file)
@@ -1445,10 +1445,18 @@ static int tcpm_ams_start(struct tcpm_port *port, enum tcpm_ams ams)
 static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header,
                           const u32 *data, int cnt)
 {
+       u32 vdo_hdr = port->vdo_data[0];
+
        WARN_ON(!mutex_is_locked(&port->lock));
 
-       /* Make sure we are not still processing a previous VDM packet */
-       WARN_ON(port->vdm_state > VDM_STATE_DONE);
+       /* If is sending discover_identity, handle received message first */
+       if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMD(vdo_hdr) == CMD_DISCOVER_IDENT) {
+               port->send_discover = true;
+               mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS);
+       } else {
+               /* Make sure we are not still processing a previous VDM packet */
+               WARN_ON(port->vdm_state > VDM_STATE_DONE);
+       }
 
        port->vdo_count = cnt + 1;
        port->vdo_data[0] = header;
@@ -1948,11 +1956,13 @@ static void vdm_run_state_machine(struct tcpm_port *port)
                        switch (PD_VDO_CMD(vdo_hdr)) {
                        case CMD_DISCOVER_IDENT:
                                res = tcpm_ams_start(port, DISCOVER_IDENTITY);
-                               if (res == 0)
+                               if (res == 0) {
                                        port->send_discover = false;
-                               else if (res == -EAGAIN)
+                               } else if (res == -EAGAIN) {
+                                       port->vdo_data[0] = 0;
                                        mod_send_discover_delayed_work(port,
                                                                       SEND_DISCOVER_RETRY_MS);
+                               }
                                break;
                        case CMD_DISCOVER_SVID:
                                res = tcpm_ams_start(port, DISCOVER_SVIDS);
@@ -2035,6 +2045,7 @@ static void vdm_run_state_machine(struct tcpm_port *port)
                        unsigned long timeout;
 
                        port->vdm_retries = 0;
+                       port->vdo_data[0] = 0;
                        port->vdm_state = VDM_STATE_BUSY;
                        timeout = vdm_ready_timeout(vdo_hdr);
                        mod_vdm_delayed_work(port, timeout);
@@ -4570,6 +4581,9 @@ static void run_state_machine(struct tcpm_port *port)
        case SOFT_RESET:
                port->message_id = 0;
                port->rx_msgid = -1;
+               /* remove existing capabilities */
+               usb_power_delivery_unregister_capabilities(port->partner_source_caps);
+               port->partner_source_caps = NULL;
                tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
                tcpm_ams_finish(port);
                if (port->pwr_role == TYPEC_SOURCE) {
@@ -4589,6 +4603,9 @@ static void run_state_machine(struct tcpm_port *port)
        case SOFT_RESET_SEND:
                port->message_id = 0;
                port->rx_msgid = -1;
+               /* remove existing capabilities */
+               usb_power_delivery_unregister_capabilities(port->partner_source_caps);
+               port->partner_source_caps = NULL;
                if (tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET))
                        tcpm_set_state_cond(port, hard_reset_state(port), 0);
                else
@@ -4718,6 +4735,9 @@ static void run_state_machine(struct tcpm_port *port)
                tcpm_set_state(port, SNK_STARTUP, 0);
                break;
        case PR_SWAP_SNK_SRC_SINK_OFF:
+               /* will be source, remove existing capabilities */
+               usb_power_delivery_unregister_capabilities(port->partner_source_caps);
+               port->partner_source_caps = NULL;
                /*
                 * Prevent vbus discharge circuit from turning on during PR_SWAP
                 * as this is not a disconnect.
index f632350f6dcb2ad43bf4faa6509ce1d917c654b4..8d1baf28df55c90e3f3bf2078ee6373134ddcfc6 100644 (file)
@@ -1125,12 +1125,11 @@ static struct fwnode_handle *ucsi_find_fwnode(struct ucsi_connector *con)
        return NULL;
 }
 
-static int ucsi_register_port(struct ucsi *ucsi, int index)
+static int ucsi_register_port(struct ucsi *ucsi, struct ucsi_connector *con)
 {
        struct usb_power_delivery_desc desc = { ucsi->cap.pd_version};
        struct usb_power_delivery_capabilities_desc pd_caps;
        struct usb_power_delivery_capabilities *pd_cap;
-       struct ucsi_connector *con = &ucsi->connector[index];
        struct typec_capability *cap = &con->typec_cap;
        enum typec_accessory *accessory = cap->accessory;
        enum usb_role u_role = USB_ROLE_NONE;
@@ -1151,7 +1150,6 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
        init_completion(&con->complete);
        mutex_init(&con->lock);
        INIT_LIST_HEAD(&con->partner_tasks);
-       con->num = index + 1;
        con->ucsi = ucsi;
 
        cap->fwnode = ucsi_find_fwnode(con);
@@ -1328,8 +1326,8 @@ out_unlock:
  */
 static int ucsi_init(struct ucsi *ucsi)
 {
-       struct ucsi_connector *con;
-       u64 command;
+       struct ucsi_connector *con, *connector;
+       u64 command, ntfy;
        int ret;
        int i;
 
@@ -1341,8 +1339,8 @@ static int ucsi_init(struct ucsi *ucsi)
        }
 
        /* Enable basic notifications */
-       ucsi->ntfy = UCSI_ENABLE_NTFY_CMD_COMPLETE | UCSI_ENABLE_NTFY_ERROR;
-       command = UCSI_SET_NOTIFICATION_ENABLE | ucsi->ntfy;
+       ntfy = UCSI_ENABLE_NTFY_CMD_COMPLETE | UCSI_ENABLE_NTFY_ERROR;
+       command = UCSI_SET_NOTIFICATION_ENABLE | ntfy;
        ret = ucsi_send_command(ucsi, command, NULL, 0);
        if (ret < 0)
                goto err_reset;
@@ -1359,31 +1357,33 @@ static int ucsi_init(struct ucsi *ucsi)
        }
 
        /* Allocate the connectors. Released in ucsi_unregister() */
-       ucsi->connector = kcalloc(ucsi->cap.num_connectors + 1,
-                                 sizeof(*ucsi->connector), GFP_KERNEL);
-       if (!ucsi->connector) {
+       connector = kcalloc(ucsi->cap.num_connectors + 1, sizeof(*connector), GFP_KERNEL);
+       if (!connector) {
                ret = -ENOMEM;
                goto err_reset;
        }
 
        /* Register all connectors */
        for (i = 0; i < ucsi->cap.num_connectors; i++) {
-               ret = ucsi_register_port(ucsi, i);
+               connector[i].num = i + 1;
+               ret = ucsi_register_port(ucsi, &connector[i]);
                if (ret)
                        goto err_unregister;
        }
 
        /* Enable all notifications */
-       ucsi->ntfy = UCSI_ENABLE_NTFY_ALL;
-       command = UCSI_SET_NOTIFICATION_ENABLE | ucsi->ntfy;
+       ntfy = UCSI_ENABLE_NTFY_ALL;
+       command = UCSI_SET_NOTIFICATION_ENABLE | ntfy;
        ret = ucsi_send_command(ucsi, command, NULL, 0);
        if (ret < 0)
                goto err_unregister;
 
+       ucsi->connector = connector;
+       ucsi->ntfy = ntfy;
        return 0;
 
 err_unregister:
-       for (con = ucsi->connector; con->port; con++) {
+       for (con = connector; con->port; con++) {
                ucsi_unregister_partner(con);
                ucsi_unregister_altmodes(con, UCSI_RECIPIENT_CON);
                ucsi_unregister_port_psy(con);
@@ -1399,10 +1399,7 @@ err_unregister:
                typec_unregister_port(con->port);
                con->port = NULL;
        }
-
-       kfree(ucsi->connector);
-       ucsi->connector = NULL;
-
+       kfree(connector);
 err_reset:
        memset(&ucsi->cap, 0, sizeof(ucsi->cap));
        ucsi_reset_ppm(ucsi);
index ce0c8ef80c04343d2d674579283cddfc567c3e85..62206a6b8ea75019e1ec767146a1a1d35e5e5b90 100644 (file)
@@ -78,7 +78,7 @@ static int ucsi_acpi_sync_write(struct ucsi *ucsi, unsigned int offset,
        if (ret)
                goto out_clear_bit;
 
-       if (!wait_for_completion_timeout(&ua->complete, HZ))
+       if (!wait_for_completion_timeout(&ua->complete, 5 * HZ))
                ret = -ETIMEDOUT;
 
 out_clear_bit:
index 058fbe28107e9e2740c698ad862a1b1b747c8cc1..25fc4120b618de16483ae2bd89af9bb866206bcd 100644 (file)
@@ -96,6 +96,7 @@ struct mlx5_vdpa_dev {
        struct mlx5_control_vq cvq;
        struct workqueue_struct *wq;
        unsigned int group2asid[MLX5_VDPA_NUMVQ_GROUPS];
+       bool suspended;
 };
 
 int mlx5_vdpa_alloc_pd(struct mlx5_vdpa_dev *dev, u32 *pdn, u16 uid);
index 3a0e721aef05fb419ca84c604ed29c79f8e84da8..520646ae7fa013b2612c8ec240f4585fab669f92 100644 (file)
@@ -2438,7 +2438,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev,
        if (err)
                goto err_mr;
 
-       if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
+       if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK) || mvdev->suspended)
                goto err_mr;
 
        restore_channels_info(ndev);
@@ -2606,6 +2606,7 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
        clear_vqs_ready(ndev);
        mlx5_vdpa_destroy_mr(&ndev->mvdev);
        ndev->mvdev.status = 0;
+       ndev->mvdev.suspended = false;
        ndev->cur_num_vqs = 0;
        ndev->mvdev.cvq.received_desc = 0;
        ndev->mvdev.cvq.completed_desc = 0;
@@ -2852,6 +2853,8 @@ static int mlx5_vdpa_suspend(struct vdpa_device *vdev)
        struct mlx5_vdpa_virtqueue *mvq;
        int i;
 
+       mlx5_vdpa_info(mvdev, "suspending device\n");
+
        down_write(&ndev->reslock);
        ndev->nb_registered = false;
        mlx5_notifier_unregister(mvdev->mdev, &ndev->nb);
@@ -2861,6 +2864,7 @@ static int mlx5_vdpa_suspend(struct vdpa_device *vdev)
                suspend_vq(ndev, mvq);
        }
        mlx5_vdpa_cvq_suspend(mvdev);
+       mvdev->suspended = true;
        up_write(&ndev->reslock);
        return 0;
 }
index 6a0a658146269a5d1c218d88747f4a6186060869..eea23c630f7c08d3abc33ef463529bde775e64b8 100644 (file)
@@ -68,6 +68,17 @@ static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx)
                          (uintptr_t)vq->device_addr);
 
        vq->vring.last_avail_idx = last_avail_idx;
+
+       /*
+        * Since vdpa_sim does not support receive inflight descriptors as a
+        * destination of a migration, let's set both avail_idx and used_idx
+        * the same at vq start.  This is how vhost-user works in a
+        * VHOST_SET_VRING_BASE call.
+        *
+        * Although the simple fix is to set last_used_idx at
+        * vdpasim_set_vq_state, it would be reset at vdpasim_queue_ready.
+        */
+       vq->vring.last_used_idx = last_avail_idx;
        vq->vring.notify = vdpasim_vq_notify;
 }
 
index 8fe267ca3e76f2483501030aa838b9773321bac6..281287fae89f137e18e09d5c259115f3af1fcb4f 100644 (file)
@@ -645,8 +645,8 @@ static void vp_vdpa_remove(struct pci_dev *pdev)
        struct virtio_pci_modern_device *mdev = NULL;
 
        mdev = vp_vdpa_mgtdev->mdev;
-       vp_modern_remove(mdev);
        vdpa_mgmtdev_unregister(&vp_vdpa_mgtdev->mgtdev);
+       vp_modern_remove(mdev);
        kfree(vp_vdpa_mgtdev->mgtdev.id_table);
        kfree(mdev);
        kfree(vp_vdpa_mgtdev);
index e897537a9e8ad7e26238eee6c8bd90ab3fbaf6b6..d95fd382814c8f28ba4db8d78170e97441992cc9 100644 (file)
@@ -442,16 +442,10 @@ static long mlx5vf_precopy_ioctl(struct file *filp, unsigned int cmd,
        if (migf->pre_copy_initial_bytes > *pos) {
                info.initial_bytes = migf->pre_copy_initial_bytes - *pos;
        } else {
-               buf = mlx5vf_get_data_buff_from_pos(migf, *pos, &end_of_data);
-               if (buf) {
-                       info.dirty_bytes = buf->start_pos + buf->length - *pos;
-               } else {
-                       if (!end_of_data) {
-                               ret = -EINVAL;
-                               goto err_migf_unlock;
-                       }
-                       info.dirty_bytes = inc_length;
-               }
+               info.dirty_bytes = migf->max_pos - *pos;
+               if (!info.dirty_bytes)
+                       end_of_data = true;
+               info.dirty_bytes += inc_length;
        }
 
        if (!end_of_data || !inc_length) {
index dc12dbd5b43ba6045078e621afd035e0b7946e8c..7be9d9d8f01c819d60e309ec1d6a83e46e03bf21 100644 (file)
@@ -1169,6 +1169,7 @@ static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
 
 err_attach:
        iommu_domain_free(v->domain);
+       v->domain = NULL;
        return ret;
 }
 
@@ -1213,6 +1214,7 @@ static void vhost_vdpa_cleanup(struct vhost_vdpa *v)
                        vhost_vdpa_remove_as(v, asid);
        }
 
+       vhost_vdpa_free_domain(v);
        vhost_dev_cleanup(&v->vdev);
        kfree(v->vdev.vqs);
 }
@@ -1285,7 +1287,6 @@ static int vhost_vdpa_release(struct inode *inode, struct file *filep)
        vhost_vdpa_clean_irq(v);
        vhost_vdpa_reset(v);
        vhost_dev_stop(&v->vdev);
-       vhost_vdpa_free_domain(v);
        vhost_vdpa_config_put(v);
        vhost_vdpa_cleanup(v);
        mutex_unlock(&d->mutex);
index f65c96d1394d3257a93dfc1b1ad82bb3432cf4ed..e45338227be6e03e102b2cf06a95d5afffa21d13 100644 (file)
@@ -854,7 +854,7 @@ static struct clcd_board *clcdfb_of_get_board(struct amba_device *dev)
        board->caps = CLCD_CAP_ALL;
        board->check = clcdfb_check;
        board->decode = clcdfb_decode;
-       if (of_find_property(node, "memory-region", NULL)) {
+       if (of_property_present(node, "memory-region")) {
                board->setup = clcdfb_of_vram_setup;
                board->mmap = clcdfb_of_vram_mmap;
                board->remove = clcdfb_of_vram_remove;
index 81c3154544287763a30ff522e05bdc6305e51f21..b6b22fa4a8a01360095c3549dc63800f6dd0ddf9 100644 (file)
@@ -1040,6 +1040,9 @@ static int au1200fb_fb_check_var(struct fb_var_screeninfo *var,
        u32 pixclock;
        int screen_size, plane;
 
+       if (!var->pixclock)
+               return -EINVAL;
+
        plane = fbdev->plane;
 
        /* Make sure that the mode respect all LCD controller and
index 6403ae07970d6cbd7110f7b781627d628e69fd83..9cbadcd18b256c4ff0e525f7d09867d377d1028f 100644 (file)
@@ -306,7 +306,7 @@ static int bw2_probe(struct platform_device *op)
        if (!par->regs)
                goto out_release_fb;
 
-       if (!of_find_property(dp, "width", NULL)) {
+       if (!of_property_present(dp, "width")) {
                err = bw2_do_default_mode(par, info, &linebytes);
                if (err)
                        goto out_unmap_regs;
index bdcc3f6ab6665b6b31984f0e05b0c0bf4e594695..3a37fff4df3664df0ddf608f2908298a7bd3aa84 100644 (file)
@@ -393,7 +393,7 @@ static int cg3_probe(struct platform_device *op)
 
        cg3_blank(FB_BLANK_UNBLANK, info);
 
-       if (!of_find_property(dp, "width", NULL)) {
+       if (!of_property_present(dp, "width")) {
                err = cg3_do_default_mode(par);
                if (err)
                        goto out_unmap_screen;
index cc37ec3f8fc1f466af20402ff1254f1e780fcaff..7799d52a651f3273a8280f36e1c9174d15c50d52 100644 (file)
@@ -358,16 +358,21 @@ static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
        if (rc)
                return rc;
 
-       if (pci_enable_device(dp) < 0) {
+       rc = pci_enable_device(dp);
+       if (rc < 0) {
                dev_err(&dp->dev, "Cannot enable PCI device\n");
                goto err_out;
        }
 
-       if ((dp->resource[0].flags & IORESOURCE_MEM) == 0)
+       if ((dp->resource[0].flags & IORESOURCE_MEM) == 0) {
+               rc = -ENODEV;
                goto err_disable;
+       }
        addr = pci_resource_start(dp, 0);
-       if (addr == 0)
+       if (addr == 0) {
+               rc = -ENODEV;
                goto err_disable;
+       }
 
        p = framebuffer_alloc(0, &dp->dev);
        if (p == NULL) {
@@ -417,7 +422,8 @@ static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
 
        init_chips(p, addr);
 
-       if (register_framebuffer(p) < 0) {
+       rc = register_framebuffer(p);
+       if (rc < 0) {
                dev_err(&dp->dev,"C&T 65550 framebuffer failed to register\n");
                goto err_unmap;
        }
index 45c75ff01eca4d938a496f0d37cc46d165aa717e..c8bfc608bd9c11ea61d48d3373abaa61e669d5b4 100644 (file)
@@ -238,8 +238,7 @@ static int clps711x_fb_probe(struct platform_device *pdev)
        info->fix.mmio_start = res->start;
        info->fix.mmio_len = resource_size(res);
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-       info->screen_base = devm_ioremap_resource(dev, res);
+       info->screen_base = devm_platform_get_and_ioremap_resource(pdev, 1, &res);
        if (IS_ERR(info->screen_base)) {
                ret = PTR_ERR(info->screen_base);
                goto out_fb_release;
index aa5f059d022271c3ffe879edbd2d5027b51c710a..274f5d0fa24714ab31a7a6fcb1aa1667bda78c8a 100644 (file)
@@ -305,17 +305,18 @@ void fb_deferred_io_open(struct fb_info *info,
                         struct inode *inode,
                         struct file *file)
 {
+       struct fb_deferred_io *fbdefio = info->fbdefio;
+
        file->f_mapping->a_ops = &fb_deferred_io_aops;
+       fbdefio->open_count++;
 }
 EXPORT_SYMBOL_GPL(fb_deferred_io_open);
 
-void fb_deferred_io_release(struct fb_info *info)
+static void fb_deferred_io_lastclose(struct fb_info *info)
 {
-       struct fb_deferred_io *fbdefio = info->fbdefio;
        struct page *page;
        int i;
 
-       BUG_ON(!fbdefio);
        cancel_delayed_work_sync(&info->deferred_work);
 
        /* clear out the mapping that we setup */
@@ -324,13 +325,21 @@ void fb_deferred_io_release(struct fb_info *info)
                page->mapping = NULL;
        }
 }
+
+void fb_deferred_io_release(struct fb_info *info)
+{
+       struct fb_deferred_io *fbdefio = info->fbdefio;
+
+       if (!--fbdefio->open_count)
+               fb_deferred_io_lastclose(info);
+}
 EXPORT_SYMBOL_GPL(fb_deferred_io_release);
 
 void fb_deferred_io_cleanup(struct fb_info *info)
 {
        struct fb_deferred_io *fbdefio = info->fbdefio;
 
-       fb_deferred_io_release(info);
+       fb_deferred_io_lastclose(info);
 
        kvfree(info->pagerefs);
        mutex_destroy(&fbdefio->lock);
index 8130e9eee2b4b4151a202358d0ce9d69964530de..556d8b1a9e06aefd6ec34796d9a60963c733f956 100644 (file)
@@ -235,6 +235,9 @@ static void get_modedb(struct fb_videomode **modedb, unsigned int *size)
 
 static int lxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
 {
+       if (!var->pixclock)
+               return -EINVAL;
+
        if (var->xres > 1920 || var->yres > 1440)
                return -EINVAL;
 
index 0a9e5067b201083be2b74654b7c60ea89b575d43..a81095b2b1ea592e6eaabc03f0f880d459e9e1fd 100644 (file)
@@ -1222,6 +1222,9 @@ static int intelfb_check_var(struct fb_var_screeninfo *var,
 
        dinfo = GET_DINFO(info);
 
+       if (!var->pixclock)
+               return -EINVAL;
+
        /* update the pitch */
        if (intelfbhw_validate_mode(dinfo, var) != 0)
                return -EINVAL;
index e60a276b4855dcddd06a6ffbe46220b161bba7d5..ea4ba3dfb96bb51d149c0e288286269bc1a1ad0f 100644 (file)
@@ -764,6 +764,8 @@ static int nvidiafb_check_var(struct fb_var_screeninfo *var,
        int pitch, err = 0;
 
        NVTRACE_ENTER();
+       if (!var->pixclock)
+               return -EINVAL;
 
        var->transp.offset = 0;
        var->transp.length = 0;
index f7ad6bc9d02d718e3c10818000554005f3978e49..b97d251d894b7a9100f76b1f0eff7d2f20694e69 100644 (file)
@@ -549,10 +549,10 @@ static void offb_init_nodriver(struct platform_device *parent, struct device_nod
        int foreign_endian = 0;
 
 #ifdef __BIG_ENDIAN
-       if (of_get_property(dp, "little-endian", NULL))
+       if (of_property_read_bool(dp, "little-endian"))
                foreign_endian = FBINFO_FOREIGN_ENDIAN;
 #else
-       if (of_get_property(dp, "big-endian", NULL))
+       if (of_property_read_bool(dp, "big-endian"))
                foreign_endian = FBINFO_FOREIGN_ENDIAN;
 #endif
 
index 504edb9c09dd832dbe097581f903196ddc668faf..6d5082c76919d9ba26e168c1c2ef99622e143e7d 100644 (file)
@@ -18,7 +18,6 @@ objs-y$(CONFIG_FB_OMAP_LCDC_HWA742) += hwa742.o
 
 lcds-y$(CONFIG_MACH_AMS_DELTA) += lcd_ams_delta.o
 lcds-y$(CONFIG_MACH_OMAP_PALMTE) += lcd_palmte.o
-lcds-y$(CONFIG_MACH_OMAP_OSK) += lcd_osk.o
 
 lcds-y$(CONFIG_FB_OMAP_LCD_MIPID) += lcd_mipid.o
 
diff --git a/drivers/video/fbdev/omap/lcd_osk.c b/drivers/video/fbdev/omap/lcd_osk.c
deleted file mode 100644 (file)
index 8168ba0..0000000
+++ /dev/null
@@ -1,86 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * LCD panel support for the TI OMAP OSK board
- *
- * Copyright (C) 2004 Nokia Corporation
- * Author: Imre Deak <imre.deak@nokia.com>
- * Adapted for OSK by <dirk.behme@de.bosch.com>
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/gpio.h>
-
-#include <linux/soc/ti/omap1-io.h>
-#include <linux/soc/ti/omap1-mux.h>
-
-#include "omapfb.h"
-
-static int osk_panel_enable(struct lcd_panel *panel)
-{
-       /* configure PWL pin */
-       omap_cfg_reg(PWL);
-
-       /* Enable PWL unit */
-       omap_writeb(0x01, OMAP_PWL_CLK_ENABLE);
-
-       /* Set PWL level */
-       omap_writeb(0xFF, OMAP_PWL_ENABLE);
-
-       /* set GPIO2 high (lcd power enabled) */
-       gpio_set_value(2, 1);
-
-       return 0;
-}
-
-static void osk_panel_disable(struct lcd_panel *panel)
-{
-       /* Set PWL level to zero */
-       omap_writeb(0x00, OMAP_PWL_ENABLE);
-
-       /* Disable PWL unit */
-       omap_writeb(0x00, OMAP_PWL_CLK_ENABLE);
-
-       /* set GPIO2 low */
-       gpio_set_value(2, 0);
-}
-
-static struct lcd_panel osk_panel = {
-       .name           = "osk",
-       .config         = OMAP_LCDC_PANEL_TFT,
-
-       .bpp            = 16,
-       .data_lines     = 16,
-       .x_res          = 240,
-       .y_res          = 320,
-       .pixel_clock    = 12500,
-       .hsw            = 40,
-       .hfp            = 40,
-       .hbp            = 72,
-       .vsw            = 1,
-       .vfp            = 1,
-       .vbp            = 0,
-       .pcd            = 12,
-
-       .enable         = osk_panel_enable,
-       .disable        = osk_panel_disable,
-};
-
-static int osk_panel_probe(struct platform_device *pdev)
-{
-       omapfb_register_panel(&osk_panel);
-       return 0;
-}
-
-static struct platform_driver osk_panel_driver = {
-       .probe          = osk_panel_probe,
-       .driver         = {
-               .name   = "lcd_osk",
-       },
-};
-
-module_platform_driver(osk_panel_driver);
-
-MODULE_AUTHOR("Imre Deak");
-MODULE_DESCRIPTION("LCD panel support for the TI OMAP OSK board");
-MODULE_LICENSE("GPL");
index 1f3df2055ff0d5670cc8b1b79a25937413f79a88..18736079843dca4b6fd92a4b46b9db7388abfa41 100644 (file)
@@ -544,19 +544,25 @@ static int set_fb_var(struct fb_info *fbi,
                var->yoffset = var->yres_virtual - var->yres;
 
        if (plane->color_mode == OMAPFB_COLOR_RGB444) {
-               var->red.offset   = 8; var->red.length   = 4;
-                                               var->red.msb_right   = 0;
-               var->green.offset = 4; var->green.length = 4;
-                                               var->green.msb_right = 0;
-               var->blue.offset  = 0; var->blue.length  = 4;
-                                               var->blue.msb_right  = 0;
+               var->red.offset         = 8;
+               var->red.length         = 4;
+               var->red.msb_right      = 0;
+               var->green.offset       = 4;
+               var->green.length       = 4;
+               var->green.msb_right    = 0;
+               var->blue.offset        = 0;
+               var->blue.length        = 4;
+               var->blue.msb_right     = 0;
        } else {
-               var->red.offset  = 11; var->red.length   = 5;
-                                               var->red.msb_right   = 0;
-               var->green.offset = 5;  var->green.length = 6;
-                                               var->green.msb_right = 0;
-               var->blue.offset = 0;  var->blue.length  = 5;
-                                               var->blue.msb_right  = 0;
+               var->red.offset         = 11;
+               var->red.length         = 5;
+               var->red.msb_right      = 0;
+               var->green.offset       = 5;
+               var->green.length       = 6;
+               var->green.msb_right    = 0;
+               var->blue.offset        = 0;
+               var->blue.length        = 5;
+               var->blue.msb_right     = 0;
        }
 
        var->height             = -1;
index 0ae0cab252d3d0460e8b713f37496a3235872357..09f719af0d0c914c9b62b52b3549fb2b98037101 100644 (file)
@@ -192,7 +192,7 @@ static int __init omapdss_boot_init(void)
        omapdss_walk_device(dss, true);
 
        for_each_available_child_of_node(dss, child) {
-               if (!of_find_property(child, "compatible", NULL))
+               if (!of_property_present(child, "compatible"))
                        continue;
 
                omapdss_walk_device(child, true);
index c3cd1e1cc01b497fe2eaf8b0d5f368790a4e76a9..d16729215423d0909620d72b39b036452304743b 100644 (file)
@@ -599,8 +599,7 @@ static int pxa3xx_gcu_probe(struct platform_device *pdev)
        priv->misc_dev.fops     = &pxa3xx_gcu_miscdev_fops;
 
        /* handle IO resources */
-       r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       priv->mmio_base = devm_ioremap_resource(dev, r);
+       priv->mmio_base = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
        if (IS_ERR(priv->mmio_base))
                return PTR_ERR(priv->mmio_base);
 
index f743bfbde2a6cb02e98fde4bbee27ddb2cf7b452..1f3cbe723def139e1f5cea1614d6e09c72c82028 100644 (file)
@@ -1737,10 +1737,10 @@ static int sm501fb_init_fb(struct fb_info *fb, enum sm501_controller head,
 
 #if defined(CONFIG_OF)
 #ifdef __BIG_ENDIAN
-       if (of_get_property(info->dev->parent->of_node, "little-endian", NULL))
+       if (of_property_read_bool(info->dev->parent->of_node, "little-endian"))
                fb->flags |= FBINFO_FOREIGN_ENDIAN;
 #else
-       if (of_get_property(info->dev->parent->of_node, "big-endian", NULL))
+       if (of_property_read_bool(info->dev->parent->of_node, "big-endian"))
                fb->flags |= FBINFO_FOREIGN_ENDIAN;
 #endif
 #endif
index 3feb6e40d56d8c8db1ba4699dde09465050e1ec8..ef8a4c5fc6875c55e180eea152e84e72c55c39d0 100644 (file)
@@ -921,6 +921,28 @@ SETUP_HCRX(struct stifb_info *fb)
 
 /* ------------------- driver specific functions --------------------------- */
 
+static int
+stifb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+       struct stifb_info *fb = container_of(info, struct stifb_info, info);
+
+       if (var->xres != fb->info.var.xres ||
+           var->yres != fb->info.var.yres ||
+           var->bits_per_pixel != fb->info.var.bits_per_pixel)
+               return -EINVAL;
+
+       var->xres_virtual = var->xres;
+       var->yres_virtual = var->yres;
+       var->xoffset = 0;
+       var->yoffset = 0;
+       var->grayscale = fb->info.var.grayscale;
+       var->red.length = fb->info.var.red.length;
+       var->green.length = fb->info.var.green.length;
+       var->blue.length = fb->info.var.blue.length;
+
+       return 0;
+}
+
 static int
 stifb_setcolreg(u_int regno, u_int red, u_int green,
              u_int blue, u_int transp, struct fb_info *info)
@@ -1145,6 +1167,7 @@ stifb_init_display(struct stifb_info *fb)
 
 static const struct fb_ops stifb_ops = {
        .owner          = THIS_MODULE,
+       .fb_check_var   = stifb_check_var,
        .fb_setcolreg   = stifb_setcolreg,
        .fb_blank       = stifb_blank,
        .fb_fillrect    = stifb_fillrect,
@@ -1164,6 +1187,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
        struct stifb_info *fb;
        struct fb_info *info;
        unsigned long sti_rom_address;
+       char modestr[32];
        char *dev_name;
        int bpp, xres, yres;
 
@@ -1342,6 +1366,9 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
        info->flags = FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT;
        info->pseudo_palette = &fb->pseudo_palette;
 
+       scnprintf(modestr, sizeof(modestr), "%dx%d-%d", xres, yres, bpp);
+       fb_find_mode(&info->var, info, modestr, NULL, 0, NULL, bpp);
+
        /* This has to be done !!! */
        if (fb_alloc_cmap(&info->cmap, NR_PALETTE, 0))
                goto out_err1;
index 01d87f53324d985452afb511f343d16ab67ec0dd..f2eaf6e7fff604de0a191939d53120a182e5c3b5 100644 (file)
@@ -379,8 +379,7 @@ static int tcx_probe(struct platform_device *op)
 
        spin_lock_init(&par->lock);
 
-       par->lowdepth =
-               (of_find_property(dp, "tcx-8-bit", NULL) != NULL);
+       par->lowdepth = of_property_read_bool(dp, "tcx-8-bit");
 
        sbusfb_fill_var(&info->var, dp, 8);
        info->var.red.length = 8;
index 14d37c49633c6b36819bda002333b4d63b199795..b44004880f0d1e25119ab9028aafc9acd0c61177 100644 (file)
@@ -173,6 +173,9 @@ tgafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
 {
        struct tga_par *par = (struct tga_par *)info->par;
 
+       if (!var->pixclock)
+               return -EINVAL;
+
        if (par->tga_type == TGA_TYPE_8PLANE) {
                if (var->bits_per_pixel != 8)
                        return -EINVAL;
index 8f4d674fa0d03892ea22bf3ecb8bd8f355890b03..96a6f7623e197396ed7231215df7df3d9ff1d8e7 100644 (file)
@@ -261,7 +261,6 @@ static const struct fb_ops wm8505fb_ops = {
 static int wm8505fb_probe(struct platform_device *pdev)
 {
        struct wm8505fb_info    *fbi;
-       struct resource *res;
        struct display_timings *disp_timing;
        void                    *addr;
        int ret;
@@ -299,8 +298,7 @@ static int wm8505fb_probe(struct platform_device *pdev)
        addr = addr + sizeof(struct wm8505fb_info);
        fbi->fb.pseudo_palette  = addr;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       fbi->regbase = devm_ioremap_resource(&pdev->dev, res);
+       fbi->regbase = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(fbi->regbase))
                return PTR_ERR(fbi->regbase);
 
index 1ac83900a21ccef301d2a0557549b610bbe46127..7911354827dc25042720884f408c11a98ae373bd 100644 (file)
@@ -273,8 +273,7 @@ static int xilinxfb_assign(struct platform_device *pdev,
        if (drvdata->flags & BUS_ACCESS_FLAG) {
                struct resource *res;
 
-               res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-               drvdata->regs = devm_ioremap_resource(&pdev->dev, res);
+               drvdata->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
                if (IS_ERR(drvdata->regs))
                        return PTR_ERR(drvdata->regs);
 
@@ -469,8 +468,7 @@ static int xilinxfb_of_probe(struct platform_device *pdev)
                pdata.yvirt = prop[1];
        }
 
-       if (of_find_property(pdev->dev.of_node, "rotate-display", NULL))
-               pdata.rotate_screen = 1;
+       pdata.rotate_screen = of_property_read_bool(pdev->dev.of_node, "rotate-display");
 
        platform_set_drvdata(pdev, drvdata);
        return xilinxfb_assign(pdev, drvdata, &pdata);
index 4718d7895f0b451e4a529a4e1fcce6a85ec5a4c5..ada5ef6e51b7a9f61159101b39e5c299b010e61a 100644 (file)
@@ -1,15 +1,9 @@
-
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  *  Convert a logo in ASCII PNM format to C source suitable for inclusion in
  *  the Linux kernel
  *
  *  (C) Copyright 2001-2003 by Geert Uytterhoeven <geert@linux-m68k.org>
- *
- *  --------------------------------------------------------------------------
- *
- *  This file is subject to the terms and conditions of the GNU General Public
- *  License. See the file COPYING in the main directory of the Linux
- *  distribution for more details.
  */
 
 #include <ctype.h>
@@ -34,37 +28,37 @@ static FILE *out;
 #define LINUX_LOGO_GRAY256     4       /* 256 levels grayscale */
 
 static const char *logo_types[LINUX_LOGO_GRAY256+1] = {
-    [LINUX_LOGO_MONO] = "LINUX_LOGO_MONO",
-    [LINUX_LOGO_VGA16] = "LINUX_LOGO_VGA16",
-    [LINUX_LOGO_CLUT224] = "LINUX_LOGO_CLUT224",
-    [LINUX_LOGO_GRAY256] = "LINUX_LOGO_GRAY256"
+       [LINUX_LOGO_MONO] = "LINUX_LOGO_MONO",
+       [LINUX_LOGO_VGA16] = "LINUX_LOGO_VGA16",
+       [LINUX_LOGO_CLUT224] = "LINUX_LOGO_CLUT224",
+       [LINUX_LOGO_GRAY256] = "LINUX_LOGO_GRAY256"
 };
 
 #define MAX_LINUX_LOGO_COLORS  224
 
 struct color {
-    unsigned char red;
-    unsigned char green;
-    unsigned char blue;
+       unsigned char red;
+       unsigned char green;
+       unsigned char blue;
 };
 
 static const struct color clut_vga16[16] = {
-    { 0x00, 0x00, 0x00 },
-    { 0x00, 0x00, 0xaa },
-    { 0x00, 0xaa, 0x00 },
-    { 0x00, 0xaa, 0xaa },
-    { 0xaa, 0x00, 0x00 },
-    { 0xaa, 0x00, 0xaa },
-    { 0xaa, 0x55, 0x00 },
-    { 0xaa, 0xaa, 0xaa },
-    { 0x55, 0x55, 0x55 },
-    { 0x55, 0x55, 0xff },
-    { 0x55, 0xff, 0x55 },
-    { 0x55, 0xff, 0xff },
-    { 0xff, 0x55, 0x55 },
-    { 0xff, 0x55, 0xff },
-    { 0xff, 0xff, 0x55 },
-    { 0xff, 0xff, 0xff },
+       { 0x00, 0x00, 0x00 },
+       { 0x00, 0x00, 0xaa },
+       { 0x00, 0xaa, 0x00 },
+       { 0x00, 0xaa, 0xaa },
+       { 0xaa, 0x00, 0x00 },
+       { 0xaa, 0x00, 0xaa },
+       { 0xaa, 0x55, 0x00 },
+       { 0xaa, 0xaa, 0xaa },
+       { 0x55, 0x55, 0x55 },
+       { 0x55, 0x55, 0xff },
+       { 0x55, 0xff, 0x55 },
+       { 0x55, 0xff, 0xff },
+       { 0xff, 0x55, 0x55 },
+       { 0xff, 0x55, 0xff },
+       { 0xff, 0xff, 0x55 },
+       { 0xff, 0xff, 0xff },
 };
 
 
@@ -77,438 +71,440 @@ static unsigned int logo_clutsize;
 static int is_plain_pbm = 0;
 
 static void die(const char *fmt, ...)
-    __attribute__ ((noreturn)) __attribute ((format (printf, 1, 2)));
-static void usage(void) __attribute ((noreturn));
+__attribute__((noreturn)) __attribute((format (printf, 1, 2)));
+static void usage(void) __attribute((noreturn));
 
 
 static unsigned int get_number(FILE *fp)
 {
-    int c, val;
-
-    /* Skip leading whitespace */
-    do {
-       c = fgetc(fp);
-       if (c == EOF)
-           die("%s: end of file\n", filename);
-       if (c == '#') {
-           /* Ignore comments 'till end of line */
-           do {
+       int c, val;
+
+       /* Skip leading whitespace */
+       do {
                c = fgetc(fp);
                if (c == EOF)
-                   die("%s: end of file\n", filename);
-           } while (c != '\n');
+                       die("%s: end of file\n", filename);
+               if (c == '#') {
+                       /* Ignore comments 'till end of line */
+                       do {
+                               c = fgetc(fp);
+                               if (c == EOF)
+                                       die("%s: end of file\n", filename);
+                       } while (c != '\n');
+               }
+       } while (isspace(c));
+
+       /* Parse decimal number */
+       val = 0;
+       while (isdigit(c)) {
+               val = 10*val+c-'0';
+               /* some PBM are 'broken'; GiMP for example exports a PBM without space
+                * between the digits. This is Ok cause we know a PBM can only have a '1'
+                * or a '0' for the digit.
+                */
+               if (is_plain_pbm)
+                       break;
+               c = fgetc(fp);
+               if (c == EOF)
+                       die("%s: end of file\n", filename);
        }
-    } while (isspace(c));
-
-    /* Parse decimal number */
-    val = 0;
-    while (isdigit(c)) {
-       val = 10*val+c-'0';
-       /* some PBM are 'broken'; GiMP for example exports a PBM without space
-        * between the digits. This is Ok cause we know a PBM can only have a '1'
-        * or a '0' for the digit. */
-       if (is_plain_pbm)
-               break;
-       c = fgetc(fp);
-       if (c == EOF)
-           die("%s: end of file\n", filename);
-    }
-    return val;
+       return val;
 }
 
 static unsigned int get_number255(FILE *fp, unsigned int maxval)
 {
-    unsigned int val = get_number(fp);
-    return (255*val+maxval/2)/maxval;
+       unsigned int val = get_number(fp);
+
+       return (255*val+maxval/2)/maxval;
 }
 
 static void read_image(void)
 {
-    FILE *fp;
-    unsigned int i, j;
-    int magic;
-    unsigned int maxval;
-
-    /* open image file */
-    fp = fopen(filename, "r");
-    if (!fp)
-       die("Cannot open file %s: %s\n", filename, strerror(errno));
-
-    /* check file type and read file header */
-    magic = fgetc(fp);
-    if (magic != 'P')
-       die("%s is not a PNM file\n", filename);
-    magic = fgetc(fp);
-    switch (magic) {
+       FILE *fp;
+       unsigned int i, j;
+       int magic;
+       unsigned int maxval;
+
+       /* open image file */
+       fp = fopen(filename, "r");
+       if (!fp)
+               die("Cannot open file %s: %s\n", filename, strerror(errno));
+
+       /* check file type and read file header */
+       magic = fgetc(fp);
+       if (magic != 'P')
+               die("%s is not a PNM file\n", filename);
+       magic = fgetc(fp);
+       switch (magic) {
        case '1':
        case '2':
        case '3':
-           /* Plain PBM/PGM/PPM */
-           break;
+               /* Plain PBM/PGM/PPM */
+               break;
 
        case '4':
        case '5':
        case '6':
-           /* Binary PBM/PGM/PPM */
-           die("%s: Binary PNM is not supported\n"
+               /* Binary PBM/PGM/PPM */
+               die("%s: Binary PNM is not supported\n"
                "Use pnmnoraw(1) to convert it to ASCII PNM\n", filename);
 
        default:
-           die("%s is not a PNM file\n", filename);
-    }
-    logo_width = get_number(fp);
-    logo_height = get_number(fp);
-
-    /* allocate image data */
-    logo_data = (struct color **)malloc(logo_height*sizeof(struct color *));
-    if (!logo_data)
-       die("%s\n", strerror(errno));
-    for (i = 0; i < logo_height; i++) {
-       logo_data[i] = malloc(logo_width*sizeof(struct color));
+               die("%s is not a PNM file\n", filename);
+       }
+       logo_width = get_number(fp);
+       logo_height = get_number(fp);
+
+       /* allocate image data */
+       logo_data = (struct color **)malloc(logo_height*sizeof(struct color *));
+       if (!logo_data)
+               die("%s\n", strerror(errno));
+       for (i = 0; i < logo_height; i++) {
+               logo_data[i] = malloc(logo_width*sizeof(struct color));
        if (!logo_data[i])
-           die("%s\n", strerror(errno));
-    }
+               die("%s\n", strerror(errno));
+       }
 
-    /* read image data */
-    switch (magic) {
+       /* read image data */
+       switch (magic) {
        case '1':
-           /* Plain PBM */
-           is_plain_pbm = 1;
-           for (i = 0; i < logo_height; i++)
-               for (j = 0; j < logo_width; j++)
-                   logo_data[i][j].red = logo_data[i][j].green =
-                       logo_data[i][j].blue = 255*(1-get_number(fp));
-           break;
+               /* Plain PBM */
+               is_plain_pbm = 1;
+               for (i = 0; i < logo_height; i++)
+                       for (j = 0; j < logo_width; j++)
+                               logo_data[i][j].red = logo_data[i][j].green =
+                                       logo_data[i][j].blue = 255*(1-get_number(fp));
+               break;
 
        case '2':
-           /* Plain PGM */
-           maxval = get_number(fp);
-           for (i = 0; i < logo_height; i++)
-               for (j = 0; j < logo_width; j++)
-                   logo_data[i][j].red = logo_data[i][j].green =
-                       logo_data[i][j].blue = get_number255(fp, maxval);
-           break;
+               /* Plain PGM */
+               maxval = get_number(fp);
+               for (i = 0; i < logo_height; i++)
+                       for (j = 0; j < logo_width; j++)
+                               logo_data[i][j].red = logo_data[i][j].green =
+                                       logo_data[i][j].blue = get_number255(fp, maxval);
+               break;
 
        case '3':
-           /* Plain PPM */
-           maxval = get_number(fp);
-           for (i = 0; i < logo_height; i++)
-               for (j = 0; j < logo_width; j++) {
-                   logo_data[i][j].red = get_number255(fp, maxval);
-                   logo_data[i][j].green = get_number255(fp, maxval);
-                   logo_data[i][j].blue = get_number255(fp, maxval);
-               }
-           break;
-    }
+               /* Plain PPM */
+               maxval = get_number(fp);
+               for (i = 0; i < logo_height; i++)
+                       for (j = 0; j < logo_width; j++) {
+                               logo_data[i][j].red = get_number255(fp, maxval);
+                               logo_data[i][j].green = get_number255(fp, maxval);
+                               logo_data[i][j].blue = get_number255(fp, maxval);
+                       }
+               break;
+       }
 
-    /* close file */
-    fclose(fp);
+       /* close file */
+       fclose(fp);
 }
 
 static inline int is_black(struct color c)
 {
-    return c.red == 0 && c.green == 0 && c.blue == 0;
+       return c.red == 0 && c.green == 0 && c.blue == 0;
 }
 
 static inline int is_white(struct color c)
 {
-    return c.red == 255 && c.green == 255 && c.blue == 255;
+       return c.red == 255 && c.green == 255 && c.blue == 255;
 }
 
 static inline int is_gray(struct color c)
 {
-    return c.red == c.green && c.red == c.blue;
+       return c.red == c.green && c.red == c.blue;
 }
 
 static inline int is_equal(struct color c1, struct color c2)
 {
-    return c1.red == c2.red && c1.green == c2.green && c1.blue == c2.blue;
+       return c1.red == c2.red && c1.green == c2.green && c1.blue == c2.blue;
 }
 
 static void write_header(void)
 {
-    /* open logo file */
-    if (outputname) {
-       out = fopen(outputname, "w");
-       if (!out)
-           die("Cannot create file %s: %s\n", outputname, strerror(errno));
-    } else {
-       out = stdout;
-    }
-
-    fputs("/*\n", out);
-    fputs(" *  DO NOT EDIT THIS FILE!\n", out);
-    fputs(" *\n", out);
-    fprintf(out, " *  It was automatically generated from %s\n", filename);
-    fputs(" *\n", out);
-    fprintf(out, " *  Linux logo %s\n", logoname);
-    fputs(" */\n\n", out);
-    fputs("#include <linux/linux_logo.h>\n\n", out);
-    fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
-           logoname);
+       /* open logo file */
+       if (outputname) {
+               out = fopen(outputname, "w");
+               if (!out)
+                       die("Cannot create file %s: %s\n", outputname, strerror(errno));
+       } else {
+               out = stdout;
+       }
+
+       fputs("/*\n", out);
+       fputs(" *  DO NOT EDIT THIS FILE!\n", out);
+       fputs(" *\n", out);
+       fprintf(out, " *  It was automatically generated from %s\n", filename);
+       fputs(" *\n", out);
+       fprintf(out, " *  Linux logo %s\n", logoname);
+       fputs(" */\n\n", out);
+       fputs("#include <linux/linux_logo.h>\n\n", out);
+       fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
+               logoname);
 }
 
 static void write_footer(void)
 {
-    fputs("\n};\n\n", out);
-    fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
-    fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
-    fprintf(out, "\t.width\t\t= %d,\n", logo_width);
-    fprintf(out, "\t.height\t\t= %d,\n", logo_height);
-    if (logo_type == LINUX_LOGO_CLUT224) {
-       fprintf(out, "\t.clutsize\t= %d,\n", logo_clutsize);
-       fprintf(out, "\t.clut\t\t= %s_clut,\n", logoname);
-    }
-    fprintf(out, "\t.data\t\t= %s_data\n", logoname);
-    fputs("};\n\n", out);
-
-    /* close logo file */
-    if (outputname)
-       fclose(out);
+       fputs("\n};\n\n", out);
+       fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
+       fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
+       fprintf(out, "\t.width\t\t= %d,\n", logo_width);
+       fprintf(out, "\t.height\t\t= %d,\n", logo_height);
+       if (logo_type == LINUX_LOGO_CLUT224) {
+               fprintf(out, "\t.clutsize\t= %d,\n", logo_clutsize);
+               fprintf(out, "\t.clut\t\t= %s_clut,\n", logoname);
+       }
+       fprintf(out, "\t.data\t\t= %s_data\n", logoname);
+       fputs("};\n\n", out);
+
+       /* close logo file */
+       if (outputname)
+               fclose(out);
 }
 
 static int write_hex_cnt;
 
 static void write_hex(unsigned char byte)
 {
-    if (write_hex_cnt % 12)
-       fprintf(out, ", 0x%02x", byte);
-    else if (write_hex_cnt)
-       fprintf(out, ",\n\t0x%02x", byte);
-    else
-       fprintf(out, "\t0x%02x", byte);
-    write_hex_cnt++;
+       if (write_hex_cnt % 12)
+               fprintf(out, ", 0x%02x", byte);
+       else if (write_hex_cnt)
+               fprintf(out, ",\n\t0x%02x", byte);
+       else
+               fprintf(out, "\t0x%02x", byte);
+       write_hex_cnt++;
 }
 
 static void write_logo_mono(void)
 {
-    unsigned int i, j;
-    unsigned char val, bit;
-
-    /* validate image */
-    for (i = 0; i < logo_height; i++)
-       for (j = 0; j < logo_width; j++)
-           if (!is_black(logo_data[i][j]) && !is_white(logo_data[i][j]))
-               die("Image must be monochrome\n");
-
-    /* write file header */
-    write_header();
-
-    /* write logo data */
-    for (i = 0; i < logo_height; i++) {
-       for (j = 0; j < logo_width;) {
-           for (val = 0, bit = 0x80; bit && j < logo_width; j++, bit >>= 1)
-               if (logo_data[i][j].red)
-                   val |= bit;
-           write_hex(val);
+       unsigned int i, j;
+       unsigned char val, bit;
+
+       /* validate image */
+       for (i = 0; i < logo_height; i++)
+               for (j = 0; j < logo_width; j++)
+                       if (!is_black(logo_data[i][j]) && !is_white(logo_data[i][j]))
+                               die("Image must be monochrome\n");
+
+       /* write file header */
+       write_header();
+
+       /* write logo data */
+       for (i = 0; i < logo_height; i++) {
+               for (j = 0; j < logo_width;) {
+                       for (val = 0, bit = 0x80; bit && j < logo_width; j++, bit >>= 1)
+                               if (logo_data[i][j].red)
+                                       val |= bit;
+                       write_hex(val);
+               }
        }
-    }
 
-    /* write logo structure and file footer */
-    write_footer();
+       /* write logo structure and file footer */
+       write_footer();
 }
 
 static void write_logo_vga16(void)
 {
-    unsigned int i, j, k;
-    unsigned char val;
-
-    /* validate image */
-    for (i = 0; i < logo_height; i++)
-       for (j = 0; j < logo_width; j++) {
-           for (k = 0; k < 16; k++)
-               if (is_equal(logo_data[i][j], clut_vga16[k]))
-                   break;
-           if (k == 16)
-               die("Image must use the 16 console colors only\n"
-                   "Use ppmquant(1) -map clut_vga16.ppm to reduce the number "
-                   "of colors\n");
-       }
+       unsigned int i, j, k;
+       unsigned char val;
 
-    /* write file header */
-    write_header();
-
-    /* write logo data */
-    for (i = 0; i < logo_height; i++)
-       for (j = 0; j < logo_width; j++) {
-           for (k = 0; k < 16; k++)
-               if (is_equal(logo_data[i][j], clut_vga16[k]))
-                   break;
-           val = k<<4;
-           if (++j < logo_width) {
-               for (k = 0; k < 16; k++)
-                   if (is_equal(logo_data[i][j], clut_vga16[k]))
-                       break;
-               val |= k;
-           }
-           write_hex(val);
-       }
+       /* validate image */
+       for (i = 0; i < logo_height; i++)
+               for (j = 0; j < logo_width; j++) {
+                       for (k = 0; k < 16; k++)
+                               if (is_equal(logo_data[i][j], clut_vga16[k]))
+                                       break;
+                       if (k == 16)
+                               die("Image must use the 16 console colors only\n"
+                                   "Use ppmquant(1) -map clut_vga16.ppm to reduce the number "
+                                   "of colors\n");
+               }
 
-    /* write logo structure and file footer */
-    write_footer();
+       /* write file header */
+       write_header();
+
+       /* write logo data */
+       for (i = 0; i < logo_height; i++)
+               for (j = 0; j < logo_width; j++) {
+                       for (k = 0; k < 16; k++)
+                               if (is_equal(logo_data[i][j], clut_vga16[k]))
+                                       break;
+                       val = k<<4;
+                       if (++j < logo_width) {
+                               for (k = 0; k < 16; k++)
+                                       if (is_equal(logo_data[i][j], clut_vga16[k]))
+                                               break;
+                               val |= k;
+                       }
+                       write_hex(val);
+               }
+
+       /* write logo structure and file footer */
+       write_footer();
 }
 
 static void write_logo_clut224(void)
 {
-    unsigned int i, j, k;
-
-    /* validate image */
-    for (i = 0; i < logo_height; i++)
-       for (j = 0; j < logo_width; j++) {
-           for (k = 0; k < logo_clutsize; k++)
-               if (is_equal(logo_data[i][j], logo_clut[k]))
-                   break;
-           if (k == logo_clutsize) {
-               if (logo_clutsize == MAX_LINUX_LOGO_COLORS)
-                   die("Image has more than %d colors\n"
-                       "Use ppmquant(1) to reduce the number of colors\n",
-                       MAX_LINUX_LOGO_COLORS);
-               logo_clut[logo_clutsize++] = logo_data[i][j];
-           }
-       }
+       unsigned int i, j, k;
 
-    /* write file header */
-    write_header();
+       /* validate image */
+       for (i = 0; i < logo_height; i++)
+               for (j = 0; j < logo_width; j++) {
+                       for (k = 0; k < logo_clutsize; k++)
+                               if (is_equal(logo_data[i][j], logo_clut[k]))
+                                       break;
+                       if (k == logo_clutsize) {
+                               if (logo_clutsize == MAX_LINUX_LOGO_COLORS)
+                                       die("Image has more than %d colors\n"
+                                           "Use ppmquant(1) to reduce the number of colors\n",
+                                           MAX_LINUX_LOGO_COLORS);
+                               logo_clut[logo_clutsize++] = logo_data[i][j];
+                       }
+               }
 
-    /* write logo data */
-    for (i = 0; i < logo_height; i++)
-       for (j = 0; j < logo_width; j++) {
-           for (k = 0; k < logo_clutsize; k++)
-               if (is_equal(logo_data[i][j], logo_clut[k]))
-                   break;
-           write_hex(k+32);
+       /* write file header */
+       write_header();
+
+       /* write logo data */
+       for (i = 0; i < logo_height; i++)
+               for (j = 0; j < logo_width; j++) {
+                       for (k = 0; k < logo_clutsize; k++)
+                               if (is_equal(logo_data[i][j], logo_clut[k]))
+                                       break;
+                       write_hex(k+32);
+               }
+       fputs("\n};\n\n", out);
+
+       /* write logo clut */
+       fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
+               logoname);
+       write_hex_cnt = 0;
+       for (i = 0; i < logo_clutsize; i++) {
+               write_hex(logo_clut[i].red);
+               write_hex(logo_clut[i].green);
+               write_hex(logo_clut[i].blue);
        }
-    fputs("\n};\n\n", out);
-
-    /* write logo clut */
-    fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
-           logoname);
-    write_hex_cnt = 0;
-    for (i = 0; i < logo_clutsize; i++) {
-       write_hex(logo_clut[i].red);
-       write_hex(logo_clut[i].green);
-       write_hex(logo_clut[i].blue);
-    }
-
-    /* write logo structure and file footer */
-    write_footer();
+
+       /* write logo structure and file footer */
+       write_footer();
 }
 
 static void write_logo_gray256(void)
 {
-    unsigned int i, j;
+       unsigned int i, j;
 
-    /* validate image */
-    for (i = 0; i < logo_height; i++)
-       for (j = 0; j < logo_width; j++)
-           if (!is_gray(logo_data[i][j]))
-               die("Image must be grayscale\n");
+       /* validate image */
+       for (i = 0; i < logo_height; i++)
+               for (j = 0; j < logo_width; j++)
+                       if (!is_gray(logo_data[i][j]))
+                               die("Image must be grayscale\n");
 
-    /* write file header */
-    write_header();
+       /* write file header */
+       write_header();
 
-    /* write logo data */
-    for (i = 0; i < logo_height; i++)
-       for (j = 0; j < logo_width; j++)
-           write_hex(logo_data[i][j].red);
+       /* write logo data */
+       for (i = 0; i < logo_height; i++)
+               for (j = 0; j < logo_width; j++)
+                       write_hex(logo_data[i][j].red);
 
-    /* write logo structure and file footer */
-    write_footer();
+       /* write logo structure and file footer */
+       write_footer();
 }
 
 static void die(const char *fmt, ...)
 {
-    va_list ap;
+       va_list ap;
 
-    va_start(ap, fmt);
-    vfprintf(stderr, fmt, ap);
-    va_end(ap);
+       va_start(ap, fmt);
+       vfprintf(stderr, fmt, ap);
+       va_end(ap);
 
-    exit(1);
+       exit(1);
 }
 
 static void usage(void)
 {
-    die("\n"
+       die("\n"
        "Usage: %s [options] <filename>\n"
        "\n"
        "Valid options:\n"
-       "    -h          : display this usage information\n"
-       "    -n <name>   : specify logo name (default: linux_logo)\n"
-       "    -o <output> : output to file <output> instead of stdout\n"
-       "    -t <type>   : specify logo type, one of\n"
-       "                      mono    : monochrome black/white\n"
-       "                      vga16   : 16 colors VGA text palette\n"
-       "                      clut224 : 224 colors (default)\n"
-       "                      gray256 : 256 levels grayscale\n"
+       "       -h                : display this usage information\n"
+       "       -n <name>   : specify logo name (default: linux_logo)\n"
+       "       -o <output> : output to file <output> instead of stdout\n"
+       "       -t <type>   : specify logo type, one of\n"
+       "                                         mono  : monochrome black/white\n"
+       "                                         vga16   : 16 colors VGA text palette\n"
+       "                                         clut224 : 224 colors (default)\n"
+       "                                         gray256 : 256 levels grayscale\n"
        "\n", programname);
 }
 
 int main(int argc, char *argv[])
 {
-    int opt;
+       int opt;
 
-    programname = argv[0];
+       programname = argv[0];
 
-    opterr = 0;
-    while (1) {
-       opt = getopt(argc, argv, "hn:o:t:");
-       if (opt == -1)
-           break;
+       opterr = 0;
+       while (1) {
+               opt = getopt(argc, argv, "hn:o:t:");
+               if (opt == -1)
+                       break;
 
-       switch (opt) {
-           case 'h':
-               usage();
-               break;
+               switch (opt) {
+               case 'h':
+                       usage();
+                       break;
 
-           case 'n':
-               logoname = optarg;
-               break;
+               case 'n':
+                       logoname = optarg;
+                       break;
 
-           case 'o':
-               outputname = optarg;
-               break;
+               case 'o':
+                       outputname = optarg;
+                       break;
 
-           case 't':
-               if (!strcmp(optarg, "mono"))
-                   logo_type = LINUX_LOGO_MONO;
-               else if (!strcmp(optarg, "vga16"))
-                   logo_type = LINUX_LOGO_VGA16;
-               else if (!strcmp(optarg, "clut224"))
-                   logo_type = LINUX_LOGO_CLUT224;
-               else if (!strcmp(optarg, "gray256"))
-                   logo_type = LINUX_LOGO_GRAY256;
-               else
-                   usage();
-               break;
+               case 't':
+                       if (!strcmp(optarg, "mono"))
+                               logo_type = LINUX_LOGO_MONO;
+                       else if (!strcmp(optarg, "vga16"))
+                               logo_type = LINUX_LOGO_VGA16;
+                       else if (!strcmp(optarg, "clut224"))
+                               logo_type = LINUX_LOGO_CLUT224;
+                       else if (!strcmp(optarg, "gray256"))
+                               logo_type = LINUX_LOGO_GRAY256;
+                       else
+                               usage();
+                       break;
 
-           default:
-               usage();
-               break;
+               default:
+                       usage();
+                       break;
+               }
        }
-    }
-    if (optind != argc-1)
-       usage();
+       if (optind != argc-1)
+               usage();
 
-    filename = argv[optind];
+       filename = argv[optind];
 
-    read_image();
-    switch (logo_type) {
+       read_image();
+       switch (logo_type) {
        case LINUX_LOGO_MONO:
-           write_logo_mono();
-           break;
+               write_logo_mono();
+               break;
 
        case LINUX_LOGO_VGA16:
-           write_logo_vga16();
-           break;
+               write_logo_vga16();
+               break;
 
        case LINUX_LOGO_CLUT224:
-           write_logo_clut224();
-           break;
+               write_logo_clut224();
+               break;
 
        case LINUX_LOGO_GRAY256:
-           write_logo_gray256();
-           break;
-    }
-    exit(0);
+               write_logo_gray256();
+               break;
+       }
+       exit(0);
 }
index 7b4e9009f33559fe04a11c17af5433715b625893..46f1a8d558b0b28321b1f9312dd415df25f4f504 100644 (file)
@@ -31,6 +31,9 @@
 #define AAD_LEN                48
 #define MSG_HDR_VER    1
 
+#define SNP_REQ_MAX_RETRY_DURATION     (60*HZ)
+#define SNP_REQ_RETRY_DELAY            (2*HZ)
+
 struct snp_guest_crypto {
        struct crypto_aead *tfm;
        u8 *iv, *authtag;
@@ -318,26 +321,14 @@ static int enc_payload(struct snp_guest_dev *snp_dev, u64 seqno, int version, u8
        return __enc_payload(snp_dev, req, payload, sz);
 }
 
-static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, int msg_ver,
-                               u8 type, void *req_buf, size_t req_sz, void *resp_buf,
-                               u32 resp_sz, __u64 *fw_err)
+static int __handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, __u64 *fw_err)
 {
-       unsigned long err;
-       u64 seqno;
+       unsigned long err = 0xff, override_err = 0;
+       unsigned long req_start = jiffies;
+       unsigned int override_npages = 0;
        int rc;
 
-       /* Get message sequence and verify that its a non-zero */
-       seqno = snp_get_msg_seqno(snp_dev);
-       if (!seqno)
-               return -EIO;
-
-       memset(snp_dev->response, 0, sizeof(struct snp_guest_msg));
-
-       /* Encrypt the userspace provided payload */
-       rc = enc_payload(snp_dev, seqno, msg_ver, type, req_buf, req_sz);
-       if (rc)
-               return rc;
-
+retry_request:
        /*
         * Call firmware to process the request. In this function the encrypted
         * message enters shared memory with the host. So after this call the
@@ -345,18 +336,24 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, in
         * prevent reuse of the IV.
         */
        rc = snp_issue_guest_request(exit_code, &snp_dev->input, &err);
+       switch (rc) {
+       case -ENOSPC:
+               /*
+                * If the extended guest request fails due to having too
+                * small of a certificate data buffer, retry the same
+                * guest request without the extended data request in
+                * order to increment the sequence number and thus avoid
+                * IV reuse.
+                */
+               override_npages = snp_dev->input.data_npages;
+               exit_code       = SVM_VMGEXIT_GUEST_REQUEST;
 
-       /*
-        * If the extended guest request fails due to having too small of a
-        * certificate data buffer, retry the same guest request without the
-        * extended data request in order to increment the sequence number
-        * and thus avoid IV reuse.
-        */
-       if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST &&
-           err == SNP_GUEST_REQ_INVALID_LEN) {
-               const unsigned int certs_npages = snp_dev->input.data_npages;
-
-               exit_code = SVM_VMGEXIT_GUEST_REQUEST;
+               /*
+                * Override the error to inform callers the given extended
+                * request buffer size was too small and give the caller the
+                * required buffer size.
+                */
+               override_err    = SNP_GUEST_REQ_INVALID_LEN;
 
                /*
                 * If this call to the firmware succeeds, the sequence number can
@@ -366,15 +363,20 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, in
                 * of the VMPCK and the error code being propagated back to the
                 * user as an ioctl() return code.
                 */
-               rc = snp_issue_guest_request(exit_code, &snp_dev->input, &err);
+               goto retry_request;
 
-               /*
-                * Override the error to inform callers the given extended
-                * request buffer size was too small and give the caller the
-                * required buffer size.
-                */
-               err = SNP_GUEST_REQ_INVALID_LEN;
-               snp_dev->input.data_npages = certs_npages;
+       /*
+        * The host may return SNP_GUEST_REQ_ERR_EBUSY if the request has been
+        * throttled. Retry in the driver to avoid returning and reusing the
+        * message sequence number on a different message.
+        */
+       case -EAGAIN:
+               if (jiffies - req_start > SNP_REQ_MAX_RETRY_DURATION) {
+                       rc = -ETIMEDOUT;
+                       break;
+               }
+               schedule_timeout_killable(SNP_REQ_RETRY_DELAY);
+               goto retry_request;
        }
 
        /*
@@ -386,7 +388,10 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, in
        snp_inc_msg_seqno(snp_dev);
 
        if (fw_err)
-               *fw_err = err;
+               *fw_err = override_err ?: err;
+
+       if (override_npages)
+               snp_dev->input.data_npages = override_npages;
 
        /*
         * If an extended guest request was issued and the supplied certificate
@@ -394,29 +399,49 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, in
         * prevent IV reuse. If the standard request was successful, return -EIO
         * back to the caller as would have originally been returned.
         */
-       if (!rc && err == SNP_GUEST_REQ_INVALID_LEN)
+       if (!rc && override_err == SNP_GUEST_REQ_INVALID_LEN)
+               return -EIO;
+
+       return rc;
+}
+
+static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, int msg_ver,
+                               u8 type, void *req_buf, size_t req_sz, void *resp_buf,
+                               u32 resp_sz, __u64 *fw_err)
+{
+       u64 seqno;
+       int rc;
+
+       /* Get message sequence and verify that its a non-zero */
+       seqno = snp_get_msg_seqno(snp_dev);
+       if (!seqno)
                return -EIO;
 
+       memset(snp_dev->response, 0, sizeof(struct snp_guest_msg));
+
+       /* Encrypt the userspace provided payload */
+       rc = enc_payload(snp_dev, seqno, msg_ver, type, req_buf, req_sz);
+       if (rc)
+               return rc;
+
+       rc = __handle_guest_request(snp_dev, exit_code, fw_err);
        if (rc) {
-               dev_alert(snp_dev->dev,
-                         "Detected error from ASP request. rc: %d, fw_err: %llu\n",
-                         rc, *fw_err);
-               goto disable_vmpck;
+               if (rc == -EIO && *fw_err == SNP_GUEST_REQ_INVALID_LEN)
+                       return rc;
+
+               dev_alert(snp_dev->dev, "Detected error from ASP request. rc: %d, fw_err: %llu\n", rc, *fw_err);
+               snp_disable_vmpck(snp_dev);
+               return rc;
        }
 
        rc = verify_and_dec_payload(snp_dev, resp_buf, resp_sz);
        if (rc) {
-               dev_alert(snp_dev->dev,
-                         "Detected unexpected decode failure from ASP. rc: %d\n",
-                         rc);
-               goto disable_vmpck;
+               dev_alert(snp_dev->dev, "Detected unexpected decode failure from ASP. rc: %d\n", rc);
+               snp_disable_vmpck(snp_dev);
+               return rc;
        }
 
        return 0;
-
-disable_vmpck:
-       snp_disable_vmpck(snp_dev);
-       return rc;
 }
 
 static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
@@ -703,6 +728,9 @@ static int __init sev_guest_probe(struct platform_device *pdev)
        void __iomem *mapping;
        int ret;
 
+       if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
+               return -ENODEV;
+
        if (!dev->platform_data)
                return -ENODEV;
 
index c6c73a33c44d556c0ac5ebf53d6b5e078bfb6dc5..b799bc759c15f4e988a858ed1f41c1d3b3b6d934 100644 (file)
@@ -64,7 +64,7 @@ static int xensyms_next_sym(struct xensyms *xs)
 
 static void *xensyms_start(struct seq_file *m, loff_t *pos)
 {
-       struct xensyms *xs = (struct xensyms *)m->private;
+       struct xensyms *xs = m->private;
 
        xs->op.u.symdata.symnum = *pos;
 
@@ -76,7 +76,7 @@ static void *xensyms_start(struct seq_file *m, loff_t *pos)
 
 static void *xensyms_next(struct seq_file *m, void *p, loff_t *pos)
 {
-       struct xensyms *xs = (struct xensyms *)m->private;
+       struct xensyms *xs = m->private;
 
        xs->op.u.symdata.symnum = ++(*pos);
 
@@ -88,7 +88,7 @@ static void *xensyms_next(struct seq_file *m, void *p, loff_t *pos)
 
 static int xensyms_show(struct seq_file *m, void *p)
 {
-       struct xensyms *xs = (struct xensyms *)m->private;
+       struct xensyms *xs = m->private;
        struct xenpf_symdata *symdata = &xs->op.u.symdata;
 
        seq_printf(m, "%016llx %c %s\n", symdata->address,
@@ -120,7 +120,7 @@ static int xensyms_open(struct inode *inode, struct file *file)
                return ret;
 
        m = file->private_data;
-       xs = (struct xensyms *)m->private;
+       xs = m->private;
 
        xs->namelen = XEN_KSYM_NAME_LEN + 1;
        xs->name = kzalloc(xs->namelen, GFP_KERNEL);
@@ -138,7 +138,7 @@ static int xensyms_open(struct inode *inode, struct file *file)
 static int xensyms_release(struct inode *inode, struct file *file)
 {
        struct seq_file *m = file->private_data;
-       struct xensyms *xs = (struct xensyms *)m->private;
+       struct xensyms *xs = m->private;
 
        kfree(xs->name);
        return seq_release_private(inode, file);
index 0ef8b8926bfa7209a6e82145ee1c2b5f81b407c7..5fc670c27f8648d996016fe8da3f6fae658fdc78 100644 (file)
@@ -1175,14 +1175,8 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
                        < block_group->zone_unusable);
                WARN_ON(block_group->space_info->disk_total
                        < block_group->length * factor);
-               WARN_ON(test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
-                                &block_group->runtime_flags) &&
-                       block_group->space_info->active_total_bytes
-                       < block_group->length);
        }
        block_group->space_info->total_bytes -= block_group->length;
-       if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags))
-               block_group->space_info->active_total_bytes -= block_group->length;
        block_group->space_info->bytes_readonly -=
                (block_group->length - block_group->zone_unusable);
        block_group->space_info->bytes_zone_unusable -=
@@ -3476,6 +3470,7 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
        spin_unlock(&info->delalloc_root_lock);
 
        while (total) {
+               struct btrfs_space_info *space_info;
                bool reclaim = false;
 
                cache = btrfs_lookup_block_group(info, bytenr);
@@ -3483,6 +3478,7 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
                        ret = -ENOENT;
                        break;
                }
+               space_info = cache->space_info;
                factor = btrfs_bg_type_to_factor(cache->flags);
 
                /*
@@ -3497,7 +3493,7 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
                byte_in_group = bytenr - cache->start;
                WARN_ON(byte_in_group > cache->length);
 
-               spin_lock(&cache->space_info->lock);
+               spin_lock(&space_info->lock);
                spin_lock(&cache->lock);
 
                if (btrfs_test_opt(info, SPACE_CACHE) &&
@@ -3510,24 +3506,24 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
                        old_val += num_bytes;
                        cache->used = old_val;
                        cache->reserved -= num_bytes;
-                       cache->space_info->bytes_reserved -= num_bytes;
-                       cache->space_info->bytes_used += num_bytes;
-                       cache->space_info->disk_used += num_bytes * factor;
+                       space_info->bytes_reserved -= num_bytes;
+                       space_info->bytes_used += num_bytes;
+                       space_info->disk_used += num_bytes * factor;
                        spin_unlock(&cache->lock);
-                       spin_unlock(&cache->space_info->lock);
+                       spin_unlock(&space_info->lock);
                } else {
                        old_val -= num_bytes;
                        cache->used = old_val;
                        cache->pinned += num_bytes;
-                       btrfs_space_info_update_bytes_pinned(info,
-                                       cache->space_info, num_bytes);
-                       cache->space_info->bytes_used -= num_bytes;
-                       cache->space_info->disk_used -= num_bytes * factor;
+                       btrfs_space_info_update_bytes_pinned(info, space_info,
+                                                            num_bytes);
+                       space_info->bytes_used -= num_bytes;
+                       space_info->disk_used -= num_bytes * factor;
 
                        reclaim = should_reclaim_block_group(cache, num_bytes);
 
                        spin_unlock(&cache->lock);
-                       spin_unlock(&cache->space_info->lock);
+                       spin_unlock(&space_info->lock);
 
                        set_extent_dirty(&trans->transaction->pinned_extents,
                                         bytenr, bytenr + num_bytes - 1,
index 0d250d052487cf04085e5b059e10be1810e7d654..d84cef89cdff522ab64931da108498d00637ea53 100644 (file)
@@ -2693,8 +2693,13 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
                bg_reclaim_threshold = READ_ONCE(sinfo->bg_reclaim_threshold);
 
        spin_lock(&ctl->tree_lock);
+       /* Count initial region as zone_unusable until it gets activated. */
        if (!used)
                to_free = size;
+       else if (initial &&
+                test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &block_group->fs_info->flags) &&
+                (block_group->flags & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM)))
+               to_free = 0;
        else if (initial)
                to_free = block_group->zone_capacity;
        else if (offset >= block_group->alloc_offset)
@@ -2722,7 +2727,8 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
        reclaimable_unusable = block_group->zone_unusable -
                               (block_group->length - block_group->zone_capacity);
        /* All the region is now unusable. Mark it as unused and reclaim */
-       if (block_group->zone_unusable == block_group->length) {
+       if (block_group->zone_unusable == block_group->length &&
+           block_group->alloc_offset) {
                btrfs_mark_bg_unused(block_group);
        } else if (bg_reclaim_threshold &&
                   reclaimable_unusable >=
index 4c477eae689148dd59c45514eb2069a073d08155..24cd492294086c1dfa02b59e61d5f4d311e99c30 100644 (file)
@@ -120,11 +120,8 @@ enum {
        /* Indicate that we want to commit the transaction. */
        BTRFS_FS_NEED_TRANS_COMMIT,
 
-       /*
-        * Indicate metadata over-commit is disabled. This is set when active
-        * zone tracking is needed.
-        */
-       BTRFS_FS_NO_OVERCOMMIT,
+       /* This is set when active zone tracking is needed. */
+       BTRFS_FS_ACTIVE_ZONE_TRACKING,
 
        /*
         * Indicate if we have some features changed, this is mostly for
index 6c18dc9a1831d03b7ec04c13ac6efc19a28d7a67..957e4d76a7b6578d59fd8f0b7d887e30d02c4e2b 100644 (file)
@@ -5421,8 +5421,13 @@ static int btrfs_inode_by_name(struct btrfs_inode *dir, struct dentry *dentry,
                return -ENOMEM;
 
        ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname);
-       if (ret)
+       if (ret < 0)
                goto out;
+       /*
+        * fscrypt_setup_filename() should never return a positive value, but
+        * gcc on sparc/parisc thinks it can, so assert that doesn't happen.
+        */
+       ASSERT(ret == 0);
 
        /* This needs to handle no-key deletions later on */
 
index 69c09508afb506ac8121caeae39fb2e7071362c0..3eecce86f63fc4e8d1cc96c6c7b09d84db076d1e 100644 (file)
@@ -308,8 +308,6 @@ void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info,
        ASSERT(found);
        spin_lock(&found->lock);
        found->total_bytes += block_group->length;
-       if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags))
-               found->active_total_bytes += block_group->length;
        found->disk_total += block_group->length * factor;
        found->bytes_used += block_group->used;
        found->disk_used += block_group->used * factor;
@@ -379,22 +377,6 @@ static u64 calc_available_free_space(struct btrfs_fs_info *fs_info,
        return avail;
 }
 
-static inline u64 writable_total_bytes(struct btrfs_fs_info *fs_info,
-                                      struct btrfs_space_info *space_info)
-{
-       /*
-        * On regular filesystem, all total_bytes are always writable. On zoned
-        * filesystem, there may be a limitation imposed by max_active_zones.
-        * For metadata allocation, we cannot finish an existing active block
-        * group to avoid a deadlock. Thus, we need to consider only the active
-        * groups to be writable for metadata space.
-        */
-       if (!btrfs_is_zoned(fs_info) || (space_info->flags & BTRFS_BLOCK_GROUP_DATA))
-               return space_info->total_bytes;
-
-       return space_info->active_total_bytes;
-}
-
 int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
                         struct btrfs_space_info *space_info, u64 bytes,
                         enum btrfs_reserve_flush_enum flush)
@@ -407,13 +389,13 @@ int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
                return 0;
 
        used = btrfs_space_info_used(space_info, true);
-       if (test_bit(BTRFS_FS_NO_OVERCOMMIT, &fs_info->flags) &&
+       if (test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags) &&
            (space_info->flags & BTRFS_BLOCK_GROUP_METADATA))
                avail = 0;
        else
                avail = calc_available_free_space(fs_info, space_info, flush);
 
-       if (used + bytes < writable_total_bytes(fs_info, space_info) + avail)
+       if (used + bytes < space_info->total_bytes + avail)
                return 1;
        return 0;
 }
@@ -449,7 +431,7 @@ again:
                ticket = list_first_entry(head, struct reserve_ticket, list);
 
                /* Check and see if our ticket can be satisfied now. */
-               if ((used + ticket->bytes <= writable_total_bytes(fs_info, space_info)) ||
+               if ((used + ticket->bytes <= space_info->total_bytes) ||
                    btrfs_can_overcommit(fs_info, space_info, ticket->bytes,
                                         flush)) {
                        btrfs_space_info_update_bytes_may_use(fs_info,
@@ -829,7 +811,6 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
 {
        u64 used;
        u64 avail;
-       u64 total;
        u64 to_reclaim = space_info->reclaim_size;
 
        lockdep_assert_held(&space_info->lock);
@@ -844,9 +825,8 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
         * space.  If that's the case add in our overage so we make sure to put
         * appropriate pressure on the flushing state machine.
         */
-       total = writable_total_bytes(fs_info, space_info);
-       if (total + avail < used)
-               to_reclaim += used - (total + avail);
+       if (space_info->total_bytes + avail < used)
+               to_reclaim += used - (space_info->total_bytes + avail);
 
        return to_reclaim;
 }
@@ -856,11 +836,10 @@ static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
 {
        u64 global_rsv_size = fs_info->global_block_rsv.reserved;
        u64 ordered, delalloc;
-       u64 total = writable_total_bytes(fs_info, space_info);
        u64 thresh;
        u64 used;
 
-       thresh = mult_perc(total, 90);
+       thresh = mult_perc(space_info->total_bytes, 90);
 
        lockdep_assert_held(&space_info->lock);
 
@@ -923,8 +902,8 @@ static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
                                           BTRFS_RESERVE_FLUSH_ALL);
        used = space_info->bytes_used + space_info->bytes_reserved +
               space_info->bytes_readonly + global_rsv_size;
-       if (used < total)
-               thresh += total - used;
+       if (used < space_info->total_bytes)
+               thresh += space_info->total_bytes - used;
        thresh >>= space_info->clamp;
 
        used = space_info->bytes_pinned;
@@ -1651,7 +1630,7 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info,
         * can_overcommit() to ensure we can overcommit to continue.
         */
        if (!pending_tickets &&
-           ((used + orig_bytes <= writable_total_bytes(fs_info, space_info)) ||
+           ((used + orig_bytes <= space_info->total_bytes) ||
             btrfs_can_overcommit(fs_info, space_info, orig_bytes, flush))) {
                btrfs_space_info_update_bytes_may_use(fs_info, space_info,
                                                      orig_bytes);
@@ -1665,8 +1644,7 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info,
         */
        if (ret && unlikely(flush == BTRFS_RESERVE_FLUSH_EMERGENCY)) {
                used = btrfs_space_info_used(space_info, false);
-               if (used + orig_bytes <=
-                   writable_total_bytes(fs_info, space_info)) {
+               if (used + orig_bytes <= space_info->total_bytes) {
                        btrfs_space_info_update_bytes_may_use(fs_info, space_info,
                                                              orig_bytes);
                        ret = 0;
index fc99ea2b0c34fc2187813dffc2a206577e65ee22..2033b71b18cece2b5ee1e35877d184e1d3ced1b2 100644 (file)
@@ -96,8 +96,6 @@ struct btrfs_space_info {
        u64 bytes_may_use;      /* number of bytes that may be used for
                                   delalloc/allocations */
        u64 bytes_readonly;     /* total bytes that are read only */
-       /* Total bytes in the space, but only accounts active block groups. */
-       u64 active_total_bytes;
        u64 bytes_zone_unusable;        /* total bytes that are unusable until
                                           resetting the device zone */
 
index 7823168c08a6aa22466addc1472f0c4f3732529c..6d0124b6e79e3ddeb1ed32c19f06aec306c78eb9 100644 (file)
@@ -6363,7 +6363,8 @@ int __btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
        ASSERT(op != BTRFS_MAP_DISCARD);
 
        em = btrfs_get_chunk_map(fs_info, logical, *length);
-       ASSERT(!IS_ERR(em));
+       if (IS_ERR(em))
+               return PTR_ERR(em);
 
        map = em->map_lookup;
        data_stripes = nr_data_stripes(map);
index f95b2c94d6199a62194d785ffa3d427c0a3fa79b..45d04092f2f8cdadded5570b677738c9de74197a 100644 (file)
@@ -524,8 +524,7 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
                }
                atomic_set(&zone_info->active_zones_left,
                           max_active_zones - nactive);
-               /* Overcommit does not work well with active zone tacking. */
-               set_bit(BTRFS_FS_NO_OVERCOMMIT, &fs_info->flags);
+               set_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags);
        }
 
        /* Validate superblock log */
@@ -1581,9 +1580,19 @@ void btrfs_calc_zone_unusable(struct btrfs_block_group *cache)
                return;
 
        WARN_ON(cache->bytes_super != 0);
-       unusable = (cache->alloc_offset - cache->used) +
-                  (cache->length - cache->zone_capacity);
-       free = cache->zone_capacity - cache->alloc_offset;
+
+       /* Check for block groups never get activated */
+       if (test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &cache->fs_info->flags) &&
+           cache->flags & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM) &&
+           !test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags) &&
+           cache->alloc_offset == 0) {
+               unusable = cache->length;
+               free = 0;
+       } else {
+               unusable = (cache->alloc_offset - cache->used) +
+                          (cache->length - cache->zone_capacity);
+               free = cache->zone_capacity - cache->alloc_offset;
+       }
 
        /* We only need ->free_space in ALLOC_SEQ block groups */
        cache->cached = BTRFS_CACHE_FINISHED;
@@ -1902,7 +1911,11 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
 
        /* Successfully activated all the zones */
        set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags);
-       space_info->active_total_bytes += block_group->length;
+       WARN_ON(block_group->alloc_offset != 0);
+       if (block_group->zone_unusable == block_group->length) {
+               block_group->zone_unusable = block_group->length - block_group->zone_capacity;
+               space_info->bytes_zone_unusable -= block_group->zone_capacity;
+       }
        spin_unlock(&block_group->lock);
        btrfs_try_granting_tickets(fs_info, space_info);
        spin_unlock(&space_info->lock);
@@ -2086,11 +2099,21 @@ bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags)
                if (!device->bdev)
                        continue;
 
-               if (!zinfo->max_active_zones ||
-                   atomic_read(&zinfo->active_zones_left)) {
+               if (!zinfo->max_active_zones) {
                        ret = true;
                        break;
                }
+
+               switch (flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
+               case 0: /* single */
+                       ret = (atomic_read(&zinfo->active_zones_left) >= 1);
+                       break;
+               case BTRFS_BLOCK_GROUP_DUP:
+                       ret = (atomic_read(&zinfo->active_zones_left) >= 2);
+                       break;
+               }
+               if (ret)
+                       break;
        }
        mutex_unlock(&fs_info->chunk_mutex);
 
@@ -2256,7 +2279,7 @@ int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info)
                u64 avail;
 
                spin_lock(&block_group->lock);
-               if (block_group->reserved ||
+               if (block_group->reserved || block_group->alloc_offset == 0 ||
                    (block_group->flags & BTRFS_BLOCK_GROUP_SYSTEM)) {
                        spin_unlock(&block_group->lock);
                        continue;
@@ -2293,10 +2316,6 @@ int btrfs_zoned_activate_one_bg(struct btrfs_fs_info *fs_info,
        if (!btrfs_is_zoned(fs_info) || (space_info->flags & BTRFS_BLOCK_GROUP_DATA))
                return 0;
 
-       /* No more block groups to activate */
-       if (space_info->active_total_bytes == space_info->total_bytes)
-               return 0;
-
        for (;;) {
                int ret;
                bool need_finish = false;
index 75d5e06306ea52d30038ffa4f2d7760bbcf346e0..bfc964b36c72ecaa9da088892be7349731b327dc 100644 (file)
@@ -99,6 +99,23 @@ path_to_dentry(struct cifs_sb_info *cifs_sb, const char *path)
        return dentry;
 }
 
+static const char *path_no_prefix(struct cifs_sb_info *cifs_sb,
+                                 const char *path)
+{
+       size_t len = 0;
+
+       if (!*path)
+               return path;
+
+       if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
+           cifs_sb->prepath) {
+               len = strlen(cifs_sb->prepath) + 1;
+               if (unlikely(len > strlen(path)))
+                       return ERR_PTR(-EINVAL);
+       }
+       return path + len;
+}
+
 /*
  * Open the and cache a directory handle.
  * If error then *cfid is not initialized.
@@ -125,6 +142,7 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
        struct dentry *dentry = NULL;
        struct cached_fid *cfid;
        struct cached_fids *cfids;
+       const char *npath;
 
        if (tcon == NULL || tcon->cfids == NULL || tcon->nohandlecache ||
            is_smb1_server(tcon->ses->server))
@@ -160,6 +178,20 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
                return 0;
        }
 
+       /*
+        * Skip any prefix paths in @path as lookup_positive_unlocked() ends up
+        * calling ->lookup() which already adds those through
+        * build_path_from_dentry().  Also, do it earlier as we might reconnect
+        * below when trying to send compounded request and then potentially
+        * having a different prefix path (e.g. after DFS failover).
+        */
+       npath = path_no_prefix(cifs_sb, path);
+       if (IS_ERR(npath)) {
+               rc = PTR_ERR(npath);
+               kfree(utf16_path);
+               return rc;
+       }
+
        /*
         * We do not hold the lock for the open because in case
         * SMB2_open needs to reconnect.
@@ -184,6 +216,7 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
 
        oparms = (struct cifs_open_parms) {
                .tcon = tcon,
+               .path = path,
                .create_options = cifs_create_options(cifs_sb, CREATE_NOT_FILE),
                .desired_access = FILE_READ_ATTRIBUTES,
                .disposition = FILE_OPEN,
@@ -251,10 +284,10 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
                                (char *)&cfid->file_all_info))
                cfid->file_all_info_is_valid = true;
 
-       if (!path[0])
+       if (!npath[0])
                dentry = dget(cifs_sb->root);
        else {
-               dentry = path_to_dentry(cifs_sb, path);
+               dentry = path_to_dentry(cifs_sb, npath);
                if (IS_ERR(dentry)) {
                        rc = -ENOENT;
                        goto oshr_free;
index 1911f7016fa1d5e747681f6df06e75a27a6db199..e9c8c088d948ccb44fa110f64ab99729a50d1e2f 100644 (file)
@@ -176,7 +176,7 @@ static int cifs_debug_files_proc_show(struct seq_file *m, void *v)
 
        seq_puts(m, "# Version:1\n");
        seq_puts(m, "# Format:\n");
-       seq_puts(m, "# <tree id> <persistent fid> <flags> <count> <pid> <uid>");
+       seq_puts(m, "# <tree id> <ses id> <persistent fid> <flags> <count> <pid> <uid>");
 #ifdef CONFIG_CIFS_DEBUG2
        seq_printf(m, " <filename> <mid>\n");
 #else
@@ -189,8 +189,9 @@ static int cifs_debug_files_proc_show(struct seq_file *m, void *v)
                                spin_lock(&tcon->open_file_lock);
                                list_for_each_entry(cfile, &tcon->openFileList, tlist) {
                                        seq_printf(m,
-                                               "0x%x 0x%llx 0x%x %d %d %d %pd",
+                                               "0x%x 0x%llx 0x%llx 0x%x %d %d %d %pd",
                                                tcon->tid,
+                                               ses->Suid,
                                                cfile->fid.persistent_fid,
                                                cfile->f_flags,
                                                cfile->count,
@@ -216,6 +217,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
 {
        struct mid_q_entry *mid_entry;
        struct TCP_Server_Info *server;
+       struct TCP_Server_Info *chan_server;
        struct cifs_ses *ses;
        struct cifs_tcon *tcon;
        struct cifs_server_iface *iface;
@@ -420,6 +422,11 @@ skip_rdma:
                                   from_kuid(&init_user_ns, ses->linux_uid),
                                   from_kuid(&init_user_ns, ses->cred_uid));
 
+                       if (ses->dfs_root_ses) {
+                               seq_printf(m, "\n\tDFS root session id: 0x%llx",
+                                          ses->dfs_root_ses->Suid);
+                       }
+
                        spin_lock(&ses->chan_lock);
                        if (CIFS_CHAN_NEEDS_RECONNECT(ses, 0))
                                seq_puts(m, "\tPrimary channel: DISCONNECTED ");
@@ -469,23 +476,35 @@ skip_rdma:
                                        seq_puts(m, "\t\t[CONNECTED]\n");
                        }
                        spin_unlock(&ses->iface_lock);
+
+                       seq_puts(m, "\n\n\tMIDs: ");
+                       spin_lock(&ses->chan_lock);
+                       for (j = 0; j < ses->chan_count; j++) {
+                               chan_server = ses->chans[j].server;
+                               if (!chan_server)
+                                       continue;
+
+                               if (list_empty(&chan_server->pending_mid_q))
+                                       continue;
+
+                               seq_printf(m, "\n\tServer ConnectionId: 0x%llx",
+                                          chan_server->conn_id);
+                               spin_lock(&chan_server->mid_lock);
+                               list_for_each_entry(mid_entry, &chan_server->pending_mid_q, qhead) {
+                                       seq_printf(m, "\n\t\tState: %d com: %d pid: %d cbdata: %p mid %llu",
+                                                  mid_entry->mid_state,
+                                                  le16_to_cpu(mid_entry->command),
+                                                  mid_entry->pid,
+                                                  mid_entry->callback_data,
+                                                  mid_entry->mid);
+                               }
+                               spin_unlock(&chan_server->mid_lock);
+                       }
+                       spin_unlock(&ses->chan_lock);
+                       seq_puts(m, "\n--\n");
                }
                if (i == 0)
                        seq_printf(m, "\n\t\t[NONE]");
-
-               seq_puts(m, "\n\n\tMIDs: ");
-               spin_lock(&server->mid_lock);
-               list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
-                       seq_printf(m, "\n\tState: %d com: %d pid:"
-                                       " %d cbdata: %p mid %llu\n",
-                                       mid_entry->mid_state,
-                                       le16_to_cpu(mid_entry->command),
-                                       mid_entry->pid,
-                                       mid_entry->callback_data,
-                                       mid_entry->mid);
-               }
-               spin_unlock(&server->mid_lock);
-               seq_printf(m, "\n--\n");
        }
        if (c == 0)
                seq_printf(m, "\n\t[NONE]");
index 2b1a8d55b4ec427dcd25ddad2439bd9a65f79c93..cb40074feb3e99bb5a86723fb17d00da744e5f41 100644 (file)
@@ -179,6 +179,7 @@ static struct vfsmount *cifs_dfs_do_automount(struct path *path)
        tmp.source = full_path;
        tmp.leaf_fullpath = NULL;
        tmp.UNC = tmp.prepath = NULL;
+       tmp.dfs_root_ses = NULL;
 
        rc = smb3_fs_context_dup(ctx, &tmp);
        if (rc) {
index 013a4bd65280ce9d7341796e456f189eff255367..65175919228014001a7b5c0b0acc4df8c72ed8b2 100644 (file)
@@ -61,8 +61,6 @@ struct cifs_sb_info {
        /* only used when CIFS_MOUNT_USE_PREFIX_PATH is set */
        char *prepath;
 
-       /* randomly generated 128-bit number for indexing dfs mount groups in referral cache */
-       uuid_t dfs_mount_id;
        /*
         * Indicate whether serverino option was turned off later
         * (cifs_autodisable_serverino) in order to match new mounts.
index cbcf210d56e4863d705597175ee952100cb4c51e..ac9034fce409d27be6640a1fd1dbcea66c3abf32 100644 (file)
@@ -731,13 +731,16 @@ static void cifs_umount_begin(struct super_block *sb)
        spin_lock(&tcon->tc_lock);
        if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
                /* we have other mounts to same share or we have
-                  already tried to force umount this and woken up
+                  already tried to umount this and woken up
                   all waiting network requests, nothing to do */
                spin_unlock(&tcon->tc_lock);
                spin_unlock(&cifs_tcp_ses_lock);
                return;
-       } else if (tcon->tc_count == 1)
-               tcon->status = TID_EXITING;
+       }
+       /*
+        * can not set tcon->status to TID_EXITING yet since we don't know if umount -f will
+        * fail later (e.g. due to open files).  TID_EXITING will be set just before tdis req sent
+        */
        spin_unlock(&tcon->tc_lock);
        spin_unlock(&cifs_tcp_ses_lock);
 
index a99883f16d9467d75dd167ada1d32b5a951a617d..08a73dcb778641bc4d313d34632bf06d9d3ed621 100644 (file)
@@ -1233,6 +1233,7 @@ struct cifs_tcon {
        /* BB add field for back pointer to sb struct(s)? */
 #ifdef CONFIG_CIFS_DFS_UPCALL
        struct list_head ulist; /* cache update list */
+       struct list_head dfs_ses_list;
 #endif
        struct delayed_work     query_interfaces; /* query interfaces workqueue job */
 };
@@ -1749,9 +1750,8 @@ struct cifs_mount_ctx {
        struct TCP_Server_Info *server;
        struct cifs_ses *ses;
        struct cifs_tcon *tcon;
-       struct cifs_ses *root_ses;
-       uuid_t mount_id;
        char *origin_fullpath, *leaf_fullpath;
+       struct list_head dfs_ses_list;
 };
 
 static inline void free_dfs_info_param(struct dfs_info3_param *param)
index a43c78396dd8815a9b972814bb599dafd2c4c456..38a697eca3050d5a862a7ef917f84edec9d4ba3e 100644 (file)
@@ -86,13 +86,11 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
 
        /*
         * only tree disconnect, open, and write, (and ulogoff which does not
-        * have tcon) are allowed as we start force umount
+        * have tcon) are allowed as we start umount
         */
        spin_lock(&tcon->tc_lock);
        if (tcon->status == TID_EXITING) {
-               if (smb_command != SMB_COM_WRITE_ANDX &&
-                   smb_command != SMB_COM_OPEN_ANDX &&
-                   smb_command != SMB_COM_TREE_DISCONNECT) {
+               if (smb_command != SMB_COM_TREE_DISCONNECT) {
                        spin_unlock(&tcon->tc_lock);
                        cifs_dbg(FYI, "can not send cmd %d while umounting\n",
                                 smb_command);
index 5233f14f0636ac0fa65ff1d1b058d186543ffada..1cbb9058799572686193dfa84925d0b03601d5ad 100644 (file)
@@ -212,31 +212,42 @@ cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server,
                        cifs_chan_update_iface(ses, server);
 
                spin_lock(&ses->chan_lock);
-               if (!mark_smb_session && cifs_chan_needs_reconnect(ses, server))
-                       goto next_session;
+               if (!mark_smb_session && cifs_chan_needs_reconnect(ses, server)) {
+                       spin_unlock(&ses->chan_lock);
+                       continue;
+               }
 
                if (mark_smb_session)
                        CIFS_SET_ALL_CHANS_NEED_RECONNECT(ses);
                else
                        cifs_chan_set_need_reconnect(ses, server);
 
+               cifs_dbg(FYI, "%s: channel connect bitmap: 0x%lx\n",
+                        __func__, ses->chans_need_reconnect);
+
                /* If all channels need reconnect, then tcon needs reconnect */
-               if (!mark_smb_session && !CIFS_ALL_CHANS_NEED_RECONNECT(ses))
-                       goto next_session;
+               if (!mark_smb_session && !CIFS_ALL_CHANS_NEED_RECONNECT(ses)) {
+                       spin_unlock(&ses->chan_lock);
+                       continue;
+               }
+               spin_unlock(&ses->chan_lock);
 
+               spin_lock(&ses->ses_lock);
                ses->ses_status = SES_NEED_RECON;
+               spin_unlock(&ses->ses_lock);
 
                list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
                        tcon->need_reconnect = true;
+                       spin_lock(&tcon->tc_lock);
                        tcon->status = TID_NEED_RECON;
+                       spin_unlock(&tcon->tc_lock);
                }
                if (ses->tcon_ipc) {
                        ses->tcon_ipc->need_reconnect = true;
+                       spin_lock(&ses->tcon_ipc->tc_lock);
                        ses->tcon_ipc->status = TID_NEED_RECON;
+                       spin_unlock(&ses->tcon_ipc->tc_lock);
                }
-
-next_session:
-               spin_unlock(&ses->chan_lock);
        }
        spin_unlock(&cifs_tcp_ses_lock);
 }
@@ -1721,7 +1732,7 @@ out_err:
        return ERR_PTR(rc);
 }
 
-/* this function must be called with ses_lock held */
+/* this function must be called with ses_lock and chan_lock held */
 static int match_session(struct cifs_ses *ses, struct smb3_fs_context *ctx)
 {
        if (ctx->sectype != Unspecified &&
@@ -1732,12 +1743,8 @@ static int match_session(struct cifs_ses *ses, struct smb3_fs_context *ctx)
         * If an existing session is limited to less channels than
         * requested, it should not be reused
         */
-       spin_lock(&ses->chan_lock);
-       if (ses->chan_max < ctx->max_channels) {
-               spin_unlock(&ses->chan_lock);
+       if (ses->chan_max < ctx->max_channels)
                return 0;
-       }
-       spin_unlock(&ses->chan_lock);
 
        switch (ses->sectype) {
        case Kerberos:
@@ -1865,10 +1872,13 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
                        spin_unlock(&ses->ses_lock);
                        continue;
                }
+               spin_lock(&ses->chan_lock);
                if (!match_session(ses, ctx)) {
+                       spin_unlock(&ses->chan_lock);
                        spin_unlock(&ses->ses_lock);
                        continue;
                }
+               spin_unlock(&ses->chan_lock);
                spin_unlock(&ses->ses_lock);
 
                ++ses->ses_count;
@@ -2229,6 +2239,7 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
         * need to lock before changing something in the session.
         */
        spin_lock(&cifs_tcp_ses_lock);
+       ses->dfs_root_ses = ctx->dfs_root_ses;
        list_add(&ses->smb_ses_list, &server->smb_ses_list);
        spin_unlock(&cifs_tcp_ses_lock);
 
@@ -2313,6 +2324,7 @@ cifs_put_tcon(struct cifs_tcon *tcon)
        WARN_ON(tcon->tc_count < 0);
 
        list_del_init(&tcon->tcon_list);
+       tcon->status = TID_EXITING;
        spin_unlock(&tcon->tc_lock);
        spin_unlock(&cifs_tcp_ses_lock);
 
@@ -2692,6 +2704,7 @@ cifs_match_super(struct super_block *sb, void *data)
 
        spin_lock(&tcp_srv->srv_lock);
        spin_lock(&ses->ses_lock);
+       spin_lock(&ses->chan_lock);
        spin_lock(&tcon->tc_lock);
        if (!match_server(tcp_srv, ctx, dfs_super_cmp) ||
            !match_session(ses, ctx) ||
@@ -2704,6 +2717,7 @@ cifs_match_super(struct super_block *sb, void *data)
        rc = compare_mount_options(sb, mnt_data);
 out:
        spin_unlock(&tcon->tc_lock);
+       spin_unlock(&ses->chan_lock);
        spin_unlock(&ses->ses_lock);
        spin_unlock(&tcp_srv->srv_lock);
 
@@ -3407,7 +3421,8 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
        bool isdfs;
        int rc;
 
-       uuid_gen(&mnt_ctx.mount_id);
+       INIT_LIST_HEAD(&mnt_ctx.dfs_ses_list);
+
        rc = dfs_mount_share(&mnt_ctx, &isdfs);
        if (rc)
                goto error;
@@ -3427,7 +3442,6 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
        kfree(cifs_sb->prepath);
        cifs_sb->prepath = ctx->prepath;
        ctx->prepath = NULL;
-       uuid_copy(&cifs_sb->dfs_mount_id, &mnt_ctx.mount_id);
 
 out:
        cifs_try_adding_channels(cifs_sb, mnt_ctx.ses);
@@ -3439,7 +3453,7 @@ out:
        return rc;
 
 error:
-       dfs_cache_put_refsrv_sessions(&mnt_ctx.mount_id);
+       dfs_put_root_smb_sessions(&mnt_ctx.dfs_ses_list);
        kfree(mnt_ctx.origin_fullpath);
        kfree(mnt_ctx.leaf_fullpath);
        cifs_mount_put_conns(&mnt_ctx);
@@ -3637,9 +3651,6 @@ cifs_umount(struct cifs_sb_info *cifs_sb)
        spin_unlock(&cifs_sb->tlink_tree_lock);
 
        kfree(cifs_sb->prepath);
-#ifdef CONFIG_CIFS_DFS_UPCALL
-       dfs_cache_put_refsrv_sessions(&cifs_sb->dfs_mount_id);
-#endif
        call_rcu(&cifs_sb->rcu, delayed_free);
 }
 
@@ -3654,11 +3665,19 @@ cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses,
 
        /* only send once per connect */
        spin_lock(&server->srv_lock);
-       if (!server->ops->need_neg(server) ||
+       if (server->tcpStatus != CifsGood &&
+           server->tcpStatus != CifsNew &&
            server->tcpStatus != CifsNeedNegotiate) {
+               spin_unlock(&server->srv_lock);
+               return -EHOSTDOWN;
+       }
+
+       if (!server->ops->need_neg(server) &&
+           server->tcpStatus == CifsGood) {
                spin_unlock(&server->srv_lock);
                return 0;
        }
+
        server->tcpStatus = CifsInNegotiate;
        spin_unlock(&server->srv_lock);
 
@@ -3692,23 +3711,28 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
        bool is_binding = false;
 
        spin_lock(&ses->ses_lock);
+       cifs_dbg(FYI, "%s: channel connect bitmap: 0x%lx\n",
+                __func__, ses->chans_need_reconnect);
+
        if (ses->ses_status != SES_GOOD &&
            ses->ses_status != SES_NEW &&
            ses->ses_status != SES_NEED_RECON) {
                spin_unlock(&ses->ses_lock);
-               return 0;
+               return -EHOSTDOWN;
        }
 
        /* only send once per connect */
        spin_lock(&ses->chan_lock);
-       if (CIFS_ALL_CHANS_GOOD(ses) ||
-           cifs_chan_in_reconnect(ses, server)) {
+       if (CIFS_ALL_CHANS_GOOD(ses)) {
+               if (ses->ses_status == SES_NEED_RECON)
+                       ses->ses_status = SES_GOOD;
                spin_unlock(&ses->chan_lock);
                spin_unlock(&ses->ses_lock);
                return 0;
        }
-       is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
+
        cifs_chan_set_in_reconnect(ses, server);
+       is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
        spin_unlock(&ses->chan_lock);
 
        if (!is_binding)
@@ -4038,9 +4062,13 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
 
        /* only send once per connect */
        spin_lock(&tcon->tc_lock);
-       if (tcon->ses->ses_status != SES_GOOD ||
-           (tcon->status != TID_NEW &&
-           tcon->status != TID_NEED_TCON)) {
+       if (tcon->status != TID_NEW &&
+           tcon->status != TID_NEED_TCON) {
+               spin_unlock(&tcon->tc_lock);
+               return -EHOSTDOWN;
+       }
+
+       if (tcon->status == TID_GOOD) {
                spin_unlock(&tcon->tc_lock);
                return 0;
        }
index b64d20374b9c853d4402ef3f8617dbfa57921d94..3a11716b6e13eb4b724177265a343981b26143a4 100644 (file)
@@ -95,25 +95,31 @@ static int get_session(struct cifs_mount_ctx *mnt_ctx, const char *full_path)
        ctx->leaf_fullpath = (char *)full_path;
        rc = cifs_mount_get_session(mnt_ctx);
        ctx->leaf_fullpath = NULL;
-       if (!rc) {
-               struct cifs_ses *ses = mnt_ctx->ses;
 
-               mutex_lock(&ses->session_mutex);
-               ses->dfs_root_ses = mnt_ctx->root_ses;
-               mutex_unlock(&ses->session_mutex);
-       }
        return rc;
 }
 
-static void set_root_ses(struct cifs_mount_ctx *mnt_ctx)
+static int get_root_smb_session(struct cifs_mount_ctx *mnt_ctx)
 {
-       if (mnt_ctx->ses) {
+       struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
+       struct dfs_root_ses *root_ses;
+       struct cifs_ses *ses = mnt_ctx->ses;
+
+       if (ses) {
+               root_ses = kmalloc(sizeof(*root_ses), GFP_KERNEL);
+               if (!root_ses)
+                       return -ENOMEM;
+
+               INIT_LIST_HEAD(&root_ses->list);
+
                spin_lock(&cifs_tcp_ses_lock);
-               mnt_ctx->ses->ses_count++;
+               ses->ses_count++;
                spin_unlock(&cifs_tcp_ses_lock);
-               dfs_cache_add_refsrv_session(&mnt_ctx->mount_id, mnt_ctx->ses);
+               root_ses->ses = ses;
+               list_add_tail(&root_ses->list, &mnt_ctx->dfs_ses_list);
        }
-       mnt_ctx->root_ses = mnt_ctx->ses;
+       ctx->dfs_root_ses = ses;
+       return 0;
 }
 
 static int get_dfs_conn(struct cifs_mount_ctx *mnt_ctx, const char *ref_path, const char *full_path,
@@ -121,7 +127,8 @@ static int get_dfs_conn(struct cifs_mount_ctx *mnt_ctx, const char *ref_path, co
 {
        struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
        struct dfs_info3_param ref = {};
-       int rc;
+       bool is_refsrv = false;
+       int rc, rc2;
 
        rc = dfs_cache_get_tgt_referral(ref_path + 1, tit, &ref);
        if (rc)
@@ -136,8 +143,7 @@ static int get_dfs_conn(struct cifs_mount_ctx *mnt_ctx, const char *ref_path, co
        if (rc)
                goto out;
 
-       if (ref.flags & DFSREF_REFERRAL_SERVER)
-               set_root_ses(mnt_ctx);
+       is_refsrv = !!(ref.flags & DFSREF_REFERRAL_SERVER);
 
        rc = -EREMOTE;
        if (ref.flags & DFSREF_STORAGE_SERVER) {
@@ -146,13 +152,17 @@ static int get_dfs_conn(struct cifs_mount_ctx *mnt_ctx, const char *ref_path, co
                        goto out;
 
                /* some servers may not advertise referral capability under ref.flags */
-               if (!(ref.flags & DFSREF_REFERRAL_SERVER) &&
-                   is_tcon_dfs(mnt_ctx->tcon))
-                       set_root_ses(mnt_ctx);
+               is_refsrv |= is_tcon_dfs(mnt_ctx->tcon);
 
                rc = cifs_is_path_remote(mnt_ctx);
        }
 
+       if (rc == -EREMOTE && is_refsrv) {
+               rc2 = get_root_smb_session(mnt_ctx);
+               if (rc2)
+                       rc = rc2;
+       }
+
 out:
        free_dfs_info_param(&ref);
        return rc;
@@ -165,6 +175,7 @@ static int __dfs_mount_share(struct cifs_mount_ctx *mnt_ctx)
        char *ref_path = NULL, *full_path = NULL;
        struct dfs_cache_tgt_iterator *tit;
        struct TCP_Server_Info *server;
+       struct cifs_tcon *tcon;
        char *origin_fullpath = NULL;
        int num_links = 0;
        int rc;
@@ -234,12 +245,22 @@ static int __dfs_mount_share(struct cifs_mount_ctx *mnt_ctx)
 
        if (!rc) {
                server = mnt_ctx->server;
+               tcon = mnt_ctx->tcon;
 
                mutex_lock(&server->refpath_lock);
-               server->origin_fullpath = origin_fullpath;
-               server->current_fullpath = server->leaf_fullpath;
+               if (!server->origin_fullpath) {
+                       server->origin_fullpath = origin_fullpath;
+                       server->current_fullpath = server->leaf_fullpath;
+                       origin_fullpath = NULL;
+               }
                mutex_unlock(&server->refpath_lock);
-               origin_fullpath = NULL;
+
+               if (list_empty(&tcon->dfs_ses_list)) {
+                       list_replace_init(&mnt_ctx->dfs_ses_list,
+                                         &tcon->dfs_ses_list);
+               } else {
+                       dfs_put_root_smb_sessions(&mnt_ctx->dfs_ses_list);
+               }
        }
 
 out:
@@ -260,7 +281,7 @@ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs)
        rc = get_session(mnt_ctx, NULL);
        if (rc)
                return rc;
-       mnt_ctx->root_ses = mnt_ctx->ses;
+       ctx->dfs_root_ses = mnt_ctx->ses;
        /*
         * If called with 'nodfs' mount option, then skip DFS resolving.  Otherwise unconditionally
         * try to get an DFS referral (even cached) to determine whether it is an DFS mount.
@@ -280,7 +301,9 @@ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs)
        }
 
        *isdfs = true;
-       set_root_ses(mnt_ctx);
+       rc = get_root_smb_session(mnt_ctx);
+       if (rc)
+               return rc;
 
        return __dfs_mount_share(mnt_ctx);
 }
@@ -479,9 +502,13 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
 
        /* only send once per connect */
        spin_lock(&tcon->tc_lock);
-       if (tcon->ses->ses_status != SES_GOOD ||
-           (tcon->status != TID_NEW &&
-           tcon->status != TID_NEED_TCON)) {
+       if (tcon->status != TID_NEW &&
+           tcon->status != TID_NEED_TCON) {
+               spin_unlock(&tcon->tc_lock);
+               return -EHOSTDOWN;
+       }
+
+       if (tcon->status == TID_GOOD) {
                spin_unlock(&tcon->tc_lock);
                return 0;
        }
index 344bea6d8bab1e471444131b1841c4442e5ca657..13f26e01f7b97c33f1bb1398b9853e2fa86e2707 100644 (file)
 #include "fs_context.h"
 #include "cifs_unicode.h"
 
+struct dfs_root_ses {
+       struct list_head list;
+       struct cifs_ses *ses;
+};
+
 int dfs_parse_target_referral(const char *full_path, const struct dfs_info3_param *ref,
                              struct smb3_fs_context *ctx);
 int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs);
@@ -22,9 +27,10 @@ static inline char *dfs_get_path(struct cifs_sb_info *cifs_sb, const char *path)
 static inline int dfs_get_referral(struct cifs_mount_ctx *mnt_ctx, const char *path,
                                   struct dfs_info3_param *ref, struct dfs_cache_tgt_list *tl)
 {
+       struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
        struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
 
-       return dfs_cache_find(mnt_ctx->xid, mnt_ctx->root_ses, cifs_sb->local_nls,
+       return dfs_cache_find(mnt_ctx->xid, ctx->dfs_root_ses, cifs_sb->local_nls,
                              cifs_remap(cifs_sb), path, ref, tl);
 }
 
@@ -43,4 +49,15 @@ static inline char *dfs_get_automount_devname(struct dentry *dentry, void *page)
                                                        true);
 }
 
+static inline void dfs_put_root_smb_sessions(struct list_head *head)
+{
+       struct dfs_root_ses *root, *tmp;
+
+       list_for_each_entry_safe(root, tmp, head, list) {
+               list_del_init(&root->list);
+               cifs_put_smb_ses(root->ses);
+               kfree(root);
+       }
+}
+
 #endif /* _CIFS_DFS_H */
index ac86bd0ebd637bc2d1440c3f2858489ed27b5364..30cbdf8514a5969c8a2b862277b0544405d2a151 100644 (file)
@@ -49,17 +49,6 @@ struct cache_entry {
        struct cache_dfs_tgt *tgthint;
 };
 
-/* List of referral server sessions per dfs mount */
-struct mount_group {
-       struct list_head list;
-       uuid_t id;
-       struct cifs_ses *sessions[CACHE_MAX_ENTRIES];
-       int num_sessions;
-       spinlock_t lock;
-       struct list_head refresh_list;
-       struct kref refcount;
-};
-
 static struct kmem_cache *cache_slab __read_mostly;
 static struct workqueue_struct *dfscache_wq __read_mostly;
 
@@ -76,85 +65,10 @@ static atomic_t cache_count;
 static struct hlist_head cache_htable[CACHE_HTABLE_SIZE];
 static DECLARE_RWSEM(htable_rw_lock);
 
-static LIST_HEAD(mount_group_list);
-static DEFINE_MUTEX(mount_group_list_lock);
-
 static void refresh_cache_worker(struct work_struct *work);
 
 static DECLARE_DELAYED_WORK(refresh_task, refresh_cache_worker);
 
-static void __mount_group_release(struct mount_group *mg)
-{
-       int i;
-
-       for (i = 0; i < mg->num_sessions; i++)
-               cifs_put_smb_ses(mg->sessions[i]);
-       kfree(mg);
-}
-
-static void mount_group_release(struct kref *kref)
-{
-       struct mount_group *mg = container_of(kref, struct mount_group, refcount);
-
-       mutex_lock(&mount_group_list_lock);
-       list_del(&mg->list);
-       mutex_unlock(&mount_group_list_lock);
-       __mount_group_release(mg);
-}
-
-static struct mount_group *find_mount_group_locked(const uuid_t *id)
-{
-       struct mount_group *mg;
-
-       list_for_each_entry(mg, &mount_group_list, list) {
-               if (uuid_equal(&mg->id, id))
-                       return mg;
-       }
-       return ERR_PTR(-ENOENT);
-}
-
-static struct mount_group *__get_mount_group_locked(const uuid_t *id)
-{
-       struct mount_group *mg;
-
-       mg = find_mount_group_locked(id);
-       if (!IS_ERR(mg))
-               return mg;
-
-       mg = kmalloc(sizeof(*mg), GFP_KERNEL);
-       if (!mg)
-               return ERR_PTR(-ENOMEM);
-       kref_init(&mg->refcount);
-       uuid_copy(&mg->id, id);
-       mg->num_sessions = 0;
-       spin_lock_init(&mg->lock);
-       list_add(&mg->list, &mount_group_list);
-       return mg;
-}
-
-static struct mount_group *get_mount_group(const uuid_t *id)
-{
-       struct mount_group *mg;
-
-       mutex_lock(&mount_group_list_lock);
-       mg = __get_mount_group_locked(id);
-       if (!IS_ERR(mg))
-               kref_get(&mg->refcount);
-       mutex_unlock(&mount_group_list_lock);
-
-       return mg;
-}
-
-static void free_mount_group_list(void)
-{
-       struct mount_group *mg, *tmp_mg;
-
-       list_for_each_entry_safe(mg, tmp_mg, &mount_group_list, list) {
-               list_del_init(&mg->list);
-               __mount_group_release(mg);
-       }
-}
-
 /**
  * dfs_cache_canonical_path - get a canonical DFS path
  *
@@ -704,7 +618,6 @@ void dfs_cache_destroy(void)
 {
        cancel_delayed_work_sync(&refresh_task);
        unload_nls(cache_cp);
-       free_mount_group_list();
        flush_cache_ents();
        kmem_cache_destroy(cache_slab);
        destroy_workqueue(dfscache_wq);
@@ -1111,54 +1024,6 @@ out_unlock:
        return rc;
 }
 
-/**
- * dfs_cache_add_refsrv_session - add SMB session of referral server
- *
- * @mount_id: mount group uuid to lookup.
- * @ses: reference counted SMB session of referral server.
- */
-void dfs_cache_add_refsrv_session(const uuid_t *mount_id, struct cifs_ses *ses)
-{
-       struct mount_group *mg;
-
-       if (WARN_ON_ONCE(!mount_id || uuid_is_null(mount_id) || !ses))
-               return;
-
-       mg = get_mount_group(mount_id);
-       if (WARN_ON_ONCE(IS_ERR(mg)))
-               return;
-
-       spin_lock(&mg->lock);
-       if (mg->num_sessions < ARRAY_SIZE(mg->sessions))
-               mg->sessions[mg->num_sessions++] = ses;
-       spin_unlock(&mg->lock);
-       kref_put(&mg->refcount, mount_group_release);
-}
-
-/**
- * dfs_cache_put_refsrv_sessions - put all referral server sessions
- *
- * Put all SMB sessions from the given mount group id.
- *
- * @mount_id: mount group uuid to lookup.
- */
-void dfs_cache_put_refsrv_sessions(const uuid_t *mount_id)
-{
-       struct mount_group *mg;
-
-       if (!mount_id || uuid_is_null(mount_id))
-               return;
-
-       mutex_lock(&mount_group_list_lock);
-       mg = find_mount_group_locked(mount_id);
-       if (IS_ERR(mg)) {
-               mutex_unlock(&mount_group_list_lock);
-               return;
-       }
-       mutex_unlock(&mount_group_list_lock);
-       kref_put(&mg->refcount, mount_group_release);
-}
-
 /* Extract share from DFS target and return a pointer to prefix path or NULL */
 static const char *parse_target_share(const char *target, char **share)
 {
@@ -1326,7 +1191,7 @@ static int __refresh_tcon(const char *path, struct cifs_tcon *tcon, bool force_r
        }
 
        spin_lock(&ipc->tc_lock);
-       if (ses->ses_status != SES_GOOD || ipc->status != TID_GOOD) {
+       if (ipc->status != TID_GOOD) {
                spin_unlock(&ipc->tc_lock);
                cifs_dbg(FYI, "%s: skip cache refresh due to disconnected ipc\n", __func__);
                goto out;
@@ -1384,11 +1249,6 @@ int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb)
                cifs_dbg(FYI, "%s: not a dfs mount\n", __func__);
                return 0;
        }
-
-       if (uuid_is_null(&cifs_sb->dfs_mount_id)) {
-               cifs_dbg(FYI, "%s: no dfs mount group id\n", __func__);
-               return -EINVAL;
-       }
        /*
         * After reconnecting to a different server, unique ids won't match anymore, so we disable
         * serverino. This prevents dentry revalidation to think the dentry are stale (ESTALE).
index be3b5a44cf82711a575e8efe70934f7e07109131..e0d39393035a99086c875324d8440f8fe03436ca 100644 (file)
@@ -40,8 +40,6 @@ int dfs_cache_get_tgt_referral(const char *path, const struct dfs_cache_tgt_iter
                               struct dfs_info3_param *ref);
 int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it, char **share,
                            char **prefix);
-void dfs_cache_put_refsrv_sessions(const uuid_t *mount_id);
-void dfs_cache_add_refsrv_session(const uuid_t *mount_id, struct cifs_ses *ses);
 char *dfs_cache_canonical_path(const char *path, const struct nls_table *cp, int remap);
 int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb);
 
index 4d4a2d82636d2a359f785c41c5efca5ba5aa8504..6831a9949c430a1d20d6ee1ab4e822254c92f02b 100644 (file)
@@ -174,13 +174,13 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
        struct list_head *tmp1;
 
        /* only send once per connect */
-       spin_lock(&tcon->ses->ses_lock);
-       if ((tcon->ses->ses_status != SES_GOOD) || (tcon->status != TID_NEED_RECON)) {
-               spin_unlock(&tcon->ses->ses_lock);
+       spin_lock(&tcon->tc_lock);
+       if (tcon->status != TID_NEED_RECON) {
+               spin_unlock(&tcon->tc_lock);
                return;
        }
        tcon->status = TID_IN_FILES_INVALIDATE;
-       spin_unlock(&tcon->ses->ses_lock);
+       spin_unlock(&tcon->tc_lock);
 
        /* list all files open on tree connection and mark them invalid */
        spin_lock(&tcon->open_file_lock);
index 44cb5639ed3ba3502db464c887526c3c0b0ec58a..3de00e7127ec49624918aaff189544ebe99ecd87 100644 (file)
@@ -265,6 +265,7 @@ struct smb3_fs_context {
        bool rootfs:1; /* if it's a SMB root file system */
        bool witness:1; /* use witness protocol */
        char *leaf_fullpath;
+       struct cifs_ses *dfs_root_ses;
 };
 
 extern const struct fs_parameter_spec smb3_fs_parameters[];
@@ -285,5 +286,5 @@ extern void smb3_update_mnt_flags(struct cifs_sb_info *cifs_sb);
  * max deferred close timeout (jiffies) - 2^30
  */
 #define SMB3_MAX_DCLOSETIMEO (1 << 30)
-#define SMB3_DEF_DCLOSETIMEO (5 * HZ) /* Can increase later, other clients use larger */
+#define SMB3_DEF_DCLOSETIMEO (1 * HZ) /* even 1 sec enough to help eg open/write/close/open/read */
 #endif
index 7d97c10f24535016a0ad447f1854274a7c853df7..c66be4904e1fa0db7277571e5163f0dfe6359b1b 100644 (file)
@@ -360,6 +360,7 @@ smb3_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
        oparms = (struct cifs_open_parms) {
                .tcon = tcon,
                .cifs_sb = cifs_sb,
+               .path = path,
                .desired_access = GENERIC_READ,
                .create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR),
                .disposition = FILE_OPEN,
@@ -427,6 +428,7 @@ smb3_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
        oparms = (struct cifs_open_parms) {
                .tcon = tcon,
                .cifs_sb = cifs_sb,
+               .path = path,
                .desired_access = GENERIC_WRITE,
                .create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR),
                .disposition = FILE_CREATE,
index a0d286ee723dda465cecbe9d9e0c4f20c3887e3e..b44fb51968bfb3dd66db8b773acda5a53e9db62e 100644 (file)
@@ -22,6 +22,7 @@
 #ifdef CONFIG_CIFS_DFS_UPCALL
 #include "dns_resolve.h"
 #include "dfs_cache.h"
+#include "dfs.h"
 #endif
 #include "fs_context.h"
 #include "cached_dir.h"
@@ -134,6 +135,9 @@ tconInfoAlloc(void)
        spin_lock_init(&ret_buf->stat_lock);
        atomic_set(&ret_buf->num_local_opens, 0);
        atomic_set(&ret_buf->num_remote_opens, 0);
+#ifdef CONFIG_CIFS_DFS_UPCALL
+       INIT_LIST_HEAD(&ret_buf->dfs_ses_list);
+#endif
 
        return ret_buf;
 }
@@ -149,6 +153,9 @@ tconInfoFree(struct cifs_tcon *tcon)
        atomic_dec(&tconInfoAllocCount);
        kfree(tcon->nativeFileSystem);
        kfree_sensitive(tcon->password);
+#ifdef CONFIG_CIFS_DFS_UPCALL
+       dfs_put_root_smb_sessions(&tcon->dfs_ses_list);
+#endif
        kfree(tcon);
 }
 
@@ -1255,6 +1262,7 @@ int cifs_inval_name_dfs_link_error(const unsigned int xid,
                 * removing cached DFS targets that the client would eventually
                 * need during failover.
                 */
+               ses = CIFS_DFS_ROOT_SES(ses);
                if (ses->server->ops->get_dfs_refer &&
                    !ses->server->ops->get_dfs_refer(xid, ses, ref_path, &refs,
                                                     &num_refs, cifs_sb->local_nls,
index 9b956294e8643dfe57772699436ffab2bd0bcc85..163a03298430d08304647d3c137903f8afbef0bc 100644 (file)
@@ -107,6 +107,7 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
 
        vars->oparms = (struct cifs_open_parms) {
                .tcon = tcon,
+               .path = full_path,
                .desired_access = desired_access,
                .disposition = create_disposition,
                .create_options = cifs_create_options(cifs_sb, create_options),
@@ -234,15 +235,32 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
                size[0] = 8; /* sizeof __le64 */
                data[0] = ptr;
 
-               rc = SMB2_set_info_init(tcon, server,
-                                       &rqst[num_rqst], COMPOUND_FID,
-                                       COMPOUND_FID, current->tgid,
-                                       FILE_END_OF_FILE_INFORMATION,
-                                       SMB2_O_INFO_FILE, 0, data, size);
+               if (cfile) {
+                       rc = SMB2_set_info_init(tcon, server,
+                                               &rqst[num_rqst],
+                                               cfile->fid.persistent_fid,
+                                               cfile->fid.volatile_fid,
+                                               current->tgid,
+                                               FILE_END_OF_FILE_INFORMATION,
+                                               SMB2_O_INFO_FILE, 0,
+                                               data, size);
+               } else {
+                       rc = SMB2_set_info_init(tcon, server,
+                                               &rqst[num_rqst],
+                                               COMPOUND_FID,
+                                               COMPOUND_FID,
+                                               current->tgid,
+                                               FILE_END_OF_FILE_INFORMATION,
+                                               SMB2_O_INFO_FILE, 0,
+                                               data, size);
+                       if (!rc) {
+                               smb2_set_next_command(tcon, &rqst[num_rqst]);
+                               smb2_set_related(&rqst[num_rqst]);
+                       }
+               }
                if (rc)
                        goto finished;
-               smb2_set_next_command(tcon, &rqst[num_rqst]);
-               smb2_set_related(&rqst[num_rqst++]);
+               num_rqst++;
                trace_smb3_set_eof_enter(xid, ses->Suid, tcon->tid, full_path);
                break;
        case SMB2_OP_SET_INFO:
index 6dfb865ee9d75155f1421b0a41677e32da15eb98..a81758225fcdc7b24b8921bf37ef95c79fde2ba3 100644 (file)
@@ -530,6 +530,14 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
        p = buf;
 
        spin_lock(&ses->iface_lock);
+       /* do not query too frequently, this time with lock held */
+       if (ses->iface_last_update &&
+           time_before(jiffies, ses->iface_last_update +
+                       (SMB_INTERFACE_POLL_INTERVAL * HZ))) {
+               spin_unlock(&ses->iface_lock);
+               return 0;
+       }
+
        /*
         * Go through iface_list and do kref_put to remove
         * any unused ifaces. ifaces in use will be removed
@@ -696,6 +704,12 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon, bool in_
        struct network_interface_info_ioctl_rsp *out_buf = NULL;
        struct cifs_ses *ses = tcon->ses;
 
+       /* do not query too frequently */
+       if (ses->iface_last_update &&
+           time_before(jiffies, ses->iface_last_update +
+                       (SMB_INTERFACE_POLL_INTERVAL * HZ)))
+               return 0;
+
        rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
                        FSCTL_QUERY_NETWORK_INTERFACE_INFO,
                        NULL /* no data input */, 0 /* no data input */,
@@ -703,7 +717,7 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon, bool in_
        if (rc == -EOPNOTSUPP) {
                cifs_dbg(FYI,
                         "server does not support query network interfaces\n");
-               goto out;
+               ret_data_len = 0;
        } else if (rc != 0) {
                cifs_tcon_dbg(VFS, "error %d on ioctl to get interface list\n", rc);
                goto out;
@@ -731,6 +745,7 @@ smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
 
        oparms = (struct cifs_open_parms) {
                .tcon = tcon,
+               .path = "",
                .desired_access = FILE_READ_ATTRIBUTES,
                .disposition = FILE_OPEN,
                .create_options = cifs_create_options(cifs_sb, 0),
@@ -774,6 +789,7 @@ smb2_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
 
        oparms = (struct cifs_open_parms) {
                .tcon = tcon,
+               .path = "",
                .desired_access = FILE_READ_ATTRIBUTES,
                .disposition = FILE_OPEN,
                .create_options = cifs_create_options(cifs_sb, 0),
@@ -821,6 +837,7 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
 
        oparms = (struct cifs_open_parms) {
                .tcon = tcon,
+               .path = full_path,
                .desired_access = FILE_READ_ATTRIBUTES,
                .disposition = FILE_OPEN,
                .create_options = cifs_create_options(cifs_sb, 0),
@@ -1105,6 +1122,7 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
 
        oparms = (struct cifs_open_parms) {
                .tcon = tcon,
+               .path = path,
                .desired_access = FILE_WRITE_EA,
                .disposition = FILE_OPEN,
                .create_options = cifs_create_options(cifs_sb, 0),
@@ -2096,6 +2114,7 @@ smb3_notify(const unsigned int xid, struct file *pfile,
        tcon = cifs_sb_master_tcon(cifs_sb);
        oparms = (struct cifs_open_parms) {
                .tcon = tcon,
+               .path = path,
                .desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA,
                .disposition = FILE_OPEN,
                .create_options = cifs_create_options(cifs_sb, 0),
@@ -2168,6 +2187,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
 
        oparms = (struct cifs_open_parms) {
                .tcon = tcon,
+               .path = path,
                .desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA,
                .disposition = FILE_OPEN,
                .create_options = cifs_create_options(cifs_sb, 0),
@@ -2500,6 +2520,7 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
 
        oparms = (struct cifs_open_parms) {
                .tcon = tcon,
+               .path = path,
                .desired_access = desired_access,
                .disposition = FILE_OPEN,
                .create_options = cifs_create_options(cifs_sb, 0),
@@ -2634,6 +2655,7 @@ smb311_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
 
        oparms = (struct cifs_open_parms) {
                .tcon = tcon,
+               .path = "",
                .desired_access = FILE_READ_ATTRIBUTES,
                .disposition = FILE_OPEN,
                .create_options = cifs_create_options(cifs_sb, 0),
@@ -2928,6 +2950,7 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
 
        oparms = (struct cifs_open_parms) {
                .tcon = tcon,
+               .path = full_path,
                .desired_access = FILE_READ_ATTRIBUTES,
                .disposition = FILE_OPEN,
                .create_options = cifs_create_options(cifs_sb, create_options),
@@ -3068,6 +3091,7 @@ smb2_query_reparse_tag(const unsigned int xid, struct cifs_tcon *tcon,
 
        oparms = (struct cifs_open_parms) {
                .tcon = tcon,
+               .path = full_path,
                .desired_access = FILE_READ_ATTRIBUTES,
                .disposition = FILE_OPEN,
                .create_options = cifs_create_options(cifs_sb, OPEN_REPARSE_POINT),
@@ -3208,6 +3232,7 @@ get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
 
        oparms = (struct cifs_open_parms) {
                .tcon = tcon,
+               .path = path,
                .desired_access = READ_CONTROL,
                .disposition = FILE_OPEN,
                /*
index 0e53265e1462a350d5c8bbde41893b50f3dac3ae..6bd2aa6af18f36a7c00cb54966d32890d7ac63e9 100644 (file)
@@ -144,7 +144,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
               struct TCP_Server_Info *server)
 {
        int rc = 0;
-       struct nls_table *nls_codepage;
+       struct nls_table *nls_codepage = NULL;
        struct cifs_ses *ses;
 
        /*
@@ -165,13 +165,9 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
        spin_lock(&tcon->tc_lock);
        if (tcon->status == TID_EXITING) {
                /*
-                * only tree disconnect, open, and write,
-                * (and ulogoff which does not have tcon)
-                * are allowed as we start force umount.
+                * only tree disconnect allowed when disconnecting ...
                 */
-               if ((smb2_command != SMB2_WRITE) &&
-                  (smb2_command != SMB2_CREATE) &&
-                  (smb2_command != SMB2_TREE_DISCONNECT)) {
+               if (smb2_command != SMB2_TREE_DISCONNECT) {
                        spin_unlock(&tcon->tc_lock);
                        cifs_dbg(FYI, "can not send cmd %d while umounting\n",
                                 smb2_command);
@@ -203,6 +199,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
        }
        spin_unlock(&server->srv_lock);
 
+again:
        rc = cifs_wait_for_server_reconnect(server, tcon->retry);
        if (rc)
                return rc;
@@ -219,8 +216,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
                 tcon->ses->chans_need_reconnect,
                 tcon->need_reconnect);
 
-       nls_codepage = load_nls_default();
-
+       mutex_lock(&ses->session_mutex);
        /*
         * Recheck after acquire mutex. If another thread is negotiating
         * and the server never sends an answer the socket will be closed
@@ -229,28 +225,38 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
        spin_lock(&server->srv_lock);
        if (server->tcpStatus == CifsNeedReconnect) {
                spin_unlock(&server->srv_lock);
+               mutex_unlock(&ses->session_mutex);
+
+               if (tcon->retry)
+                       goto again;
+
                rc = -EHOSTDOWN;
                goto out;
        }
        spin_unlock(&server->srv_lock);
 
+       nls_codepage = load_nls_default();
+
        /*
         * need to prevent multiple threads trying to simultaneously
         * reconnect the same SMB session
         */
+       spin_lock(&ses->ses_lock);
        spin_lock(&ses->chan_lock);
-       if (!cifs_chan_needs_reconnect(ses, server)) {
+       if (!cifs_chan_needs_reconnect(ses, server) &&
+           ses->ses_status == SES_GOOD) {
                spin_unlock(&ses->chan_lock);
-
+               spin_unlock(&ses->ses_lock);
                /* this means that we only need to tree connect */
                if (tcon->need_reconnect)
                        goto skip_sess_setup;
 
+               mutex_unlock(&ses->session_mutex);
                goto out;
        }
        spin_unlock(&ses->chan_lock);
+       spin_unlock(&ses->ses_lock);
 
-       mutex_lock(&ses->session_mutex);
        rc = cifs_negotiate_protocol(0, ses, server);
        if (!rc) {
                rc = cifs_setup_session(0, ses, server, nls_codepage);
@@ -266,10 +272,8 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
                mutex_unlock(&ses->session_mutex);
                goto out;
        }
-       mutex_unlock(&ses->session_mutex);
 
 skip_sess_setup:
-       mutex_lock(&ses->session_mutex);
        if (!tcon->need_reconnect) {
                mutex_unlock(&ses->session_mutex);
                goto out;
@@ -284,7 +288,7 @@ skip_sess_setup:
        cifs_dbg(FYI, "reconnect tcon rc = %d\n", rc);
        if (rc) {
                /* If sess reconnected but tcon didn't, something strange ... */
-               pr_warn_once("reconnect tcon failed rc = %d\n", rc);
+               cifs_dbg(VFS, "reconnect tcon failed rc = %d\n", rc);
                goto out;
        }
 
@@ -1256,9 +1260,9 @@ SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
        if (rc)
                return rc;
 
-       spin_lock(&ses->chan_lock);
-       is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
-       spin_unlock(&ses->chan_lock);
+       spin_lock(&ses->ses_lock);
+       is_binding = (ses->ses_status == SES_GOOD);
+       spin_unlock(&ses->ses_lock);
 
        if (is_binding) {
                req->hdr.SessionId = cpu_to_le64(ses->Suid);
@@ -1416,9 +1420,9 @@ SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
                goto out_put_spnego_key;
        }
 
-       spin_lock(&ses->chan_lock);
-       is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
-       spin_unlock(&ses->chan_lock);
+       spin_lock(&ses->ses_lock);
+       is_binding = (ses->ses_status == SES_GOOD);
+       spin_unlock(&ses->ses_lock);
 
        /* keep session key if binding */
        if (!is_binding) {
@@ -1542,9 +1546,9 @@ SMB2_sess_auth_rawntlmssp_negotiate(struct SMB2_sess_data *sess_data)
 
        cifs_dbg(FYI, "rawntlmssp session setup challenge phase\n");
 
-       spin_lock(&ses->chan_lock);
-       is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
-       spin_unlock(&ses->chan_lock);
+       spin_lock(&ses->ses_lock);
+       is_binding = (ses->ses_status == SES_GOOD);
+       spin_unlock(&ses->ses_lock);
 
        /* keep existing ses id and flags if binding */
        if (!is_binding) {
@@ -1610,9 +1614,9 @@ SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data)
 
        rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
 
-       spin_lock(&ses->chan_lock);
-       is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
-       spin_unlock(&ses->chan_lock);
+       spin_lock(&ses->ses_lock);
+       is_binding = (ses->ses_status == SES_GOOD);
+       spin_unlock(&ses->ses_lock);
 
        /* keep existing ses id and flags if binding */
        if (!is_binding) {
@@ -2705,7 +2709,7 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
        rqst.rq_nvec = n_iov;
 
        /* no need to inc num_remote_opens because we close it just below */
-       trace_smb3_posix_mkdir_enter(xid, tcon->tid, ses->Suid, CREATE_NOT_FILE,
+       trace_smb3_posix_mkdir_enter(xid, tcon->tid, ses->Suid, full_path, CREATE_NOT_FILE,
                                    FILE_WRITE_ATTRIBUTES);
        /* resource #4: response buffer */
        rc = cifs_send_recv(xid, ses, server,
@@ -2973,7 +2977,7 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
        if (rc)
                goto creat_exit;
 
-       trace_smb3_open_enter(xid, tcon->tid, tcon->ses->Suid,
+       trace_smb3_open_enter(xid, tcon->tid, tcon->ses->Suid, oparms->path,
                oparms->create_options, oparms->desired_access);
 
        rc = cifs_send_recv(xid, ses, server,
index 381babc1212c9e8a95911e6382e5ba4ff5c3cc80..790acf65a0926cbe3316d591a3bc7a1b97120974 100644 (file)
@@ -81,6 +81,7 @@ int smb2_get_sign_key(__u64 ses_id, struct TCP_Server_Info *server, u8 *key)
        struct cifs_ses *ses = NULL;
        int i;
        int rc = 0;
+       bool is_binding = false;
 
        spin_lock(&cifs_tcp_ses_lock);
 
@@ -97,9 +98,12 @@ int smb2_get_sign_key(__u64 ses_id, struct TCP_Server_Info *server, u8 *key)
        goto out;
 
 found:
+       spin_lock(&ses->ses_lock);
        spin_lock(&ses->chan_lock);
-       if (cifs_chan_needs_reconnect(ses, server) &&
-           !CIFS_ALL_CHANS_NEED_RECONNECT(ses)) {
+
+       is_binding = (cifs_chan_needs_reconnect(ses, server) &&
+                     ses->ses_status == SES_GOOD);
+       if (is_binding) {
                /*
                 * If we are in the process of binding a new channel
                 * to an existing session, use the master connection
@@ -107,6 +111,7 @@ found:
                 */
                memcpy(key, ses->smb3signingkey, SMB3_SIGN_KEY_SIZE);
                spin_unlock(&ses->chan_lock);
+               spin_unlock(&ses->ses_lock);
                goto out;
        }
 
@@ -119,10 +124,12 @@ found:
                if (chan->server == server) {
                        memcpy(key, chan->signkey, SMB3_SIGN_KEY_SIZE);
                        spin_unlock(&ses->chan_lock);
+                       spin_unlock(&ses->ses_lock);
                        goto out;
                }
        }
        spin_unlock(&ses->chan_lock);
+       spin_unlock(&ses->ses_lock);
 
        cifs_dbg(VFS,
                 "%s: Could not find channel signing key for session 0x%llx\n",
@@ -392,11 +399,15 @@ generate_smb3signingkey(struct cifs_ses *ses,
        bool is_binding = false;
        int chan_index = 0;
 
+       spin_lock(&ses->ses_lock);
        spin_lock(&ses->chan_lock);
-       is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
+       is_binding = (cifs_chan_needs_reconnect(ses, server) &&
+                     ses->ses_status == SES_GOOD);
+
        chan_index = cifs_ses_get_chan_index(ses, server);
        /* TODO: introduce ref counting for channels when the can be freed */
        spin_unlock(&ses->chan_lock);
+       spin_unlock(&ses->ses_lock);
 
        /*
         * All channels use the same encryption/decryption keys but
@@ -425,7 +436,7 @@ generate_smb3signingkey(struct cifs_ses *ses,
 
                /* safe to access primary channel, since it will never go away */
                spin_lock(&ses->chan_lock);
-               memcpy(ses->chans[0].signkey, ses->smb3signingkey,
+               memcpy(ses->chans[chan_index].signkey, ses->smb3signingkey,
                       SMB3_SIGN_KEY_SIZE);
                spin_unlock(&ses->chan_lock);
 
index 110070ba8b04e3b3e4aa2315d1cfce1c933ea809..d3053bd8ae7312656b04fe97063f50c7c8938a24 100644 (file)
@@ -701,13 +701,15 @@ DECLARE_EVENT_CLASS(smb3_open_enter_class,
        TP_PROTO(unsigned int xid,
                __u32   tid,
                __u64   sesid,
+               const char *full_path,
                int     create_options,
                int     desired_access),
-       TP_ARGS(xid, tid, sesid, create_options, desired_access),
+       TP_ARGS(xid, tid, sesid, full_path, create_options, desired_access),
        TP_STRUCT__entry(
                __field(unsigned int, xid)
                __field(__u32, tid)
                __field(__u64, sesid)
+               __string(path, full_path)
                __field(int, create_options)
                __field(int, desired_access)
        ),
@@ -715,11 +717,12 @@ DECLARE_EVENT_CLASS(smb3_open_enter_class,
                __entry->xid = xid;
                __entry->tid = tid;
                __entry->sesid = sesid;
+               __assign_str(path, full_path);
                __entry->create_options = create_options;
                __entry->desired_access = desired_access;
        ),
-       TP_printk("xid=%u sid=0x%llx tid=0x%x cr_opts=0x%x des_access=0x%x",
-               __entry->xid, __entry->sesid, __entry->tid,
+       TP_printk("xid=%u sid=0x%llx tid=0x%x path=%s cr_opts=0x%x des_access=0x%x",
+               __entry->xid, __entry->sesid, __entry->tid, __get_str(path),
                __entry->create_options, __entry->desired_access)
 )
 
@@ -728,9 +731,10 @@ DEFINE_EVENT(smb3_open_enter_class, smb3_##name,  \
        TP_PROTO(unsigned int xid,              \
                __u32   tid,                    \
                __u64   sesid,                  \
+               const char *full_path,          \
                int     create_options,         \
                int     desired_access),        \
-       TP_ARGS(xid, tid, sesid, create_options, desired_access))
+       TP_ARGS(xid, tid, sesid, full_path, create_options, desired_access))
 
 DEFINE_SMB3_OPEN_ENTER_EVENT(open_enter);
 DEFINE_SMB3_OPEN_ENTER_EVENT(posix_mkdir_enter);
index b42050c68e6c95a1420dfc3e5ccecff75dee6cd6..24bdd5f4d3bcc725a9763266270a790f03d00a49 100644 (file)
@@ -278,7 +278,7 @@ static int
 __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
                struct smb_rqst *rqst)
 {
-       int rc = 0;
+       int rc;
        struct kvec *iov;
        int n_vec;
        unsigned int send_length = 0;
@@ -289,6 +289,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
        struct msghdr smb_msg = {};
        __be32 rfc1002_marker;
 
+       cifs_in_send_inc(server);
        if (cifs_rdma_enabled(server)) {
                /* return -EAGAIN when connecting or reconnecting */
                rc = -EAGAIN;
@@ -297,14 +298,17 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
                goto smbd_done;
        }
 
+       rc = -EAGAIN;
        if (ssocket == NULL)
-               return -EAGAIN;
+               goto out;
 
+       rc = -ERESTARTSYS;
        if (fatal_signal_pending(current)) {
                cifs_dbg(FYI, "signal pending before send request\n");
-               return -ERESTARTSYS;
+               goto out;
        }
 
+       rc = 0;
        /* cork the socket */
        tcp_sock_set_cork(ssocket->sk, true);
 
@@ -407,7 +411,8 @@ smbd_done:
                         rc);
        else if (rc > 0)
                rc = 0;
-
+out:
+       cifs_in_send_dec(server);
        return rc;
 }
 
@@ -826,9 +831,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
         * I/O response may come back and free the mid entry on another thread.
         */
        cifs_save_when_sent(mid);
-       cifs_in_send_inc(server);
        rc = smb_send_rqst(server, 1, rqst, flags);
-       cifs_in_send_dec(server);
 
        if (rc < 0) {
                revert_current_mid(server, mid->credits);
@@ -1144,9 +1147,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
                else
                        midQ[i]->callback = cifs_compound_last_callback;
        }
-       cifs_in_send_inc(server);
        rc = smb_send_rqst(server, num_rqst, rqst, flags);
-       cifs_in_send_dec(server);
 
        for (i = 0; i < num_rqst; i++)
                cifs_save_when_sent(midQ[i]);
@@ -1396,9 +1397,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
 
        midQ->mid_state = MID_REQUEST_SUBMITTED;
 
-       cifs_in_send_inc(server);
        rc = smb_send(server, in_buf, len);
-       cifs_in_send_dec(server);
        cifs_save_when_sent(midQ);
 
        if (rc < 0)
@@ -1539,9 +1538,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
        }
 
        midQ->mid_state = MID_REQUEST_SUBMITTED;
-       cifs_in_send_inc(server);
        rc = smb_send(server, in_buf, len);
-       cifs_in_send_dec(server);
        cifs_save_when_sent(midQ);
 
        if (rc < 0)
index 78086f8dbda5261f6b36a9fe8523debe81c97faa..13d336a6cc5da5675fa322d3d793c13a56291848 100644 (file)
@@ -92,6 +92,8 @@ void fscrypt_put_master_key_activeref(struct super_block *sb,
         * destroying any subkeys embedded in it.
         */
 
+       if (WARN_ON(!sb->s_master_keys))
+               return;
        spin_lock(&sb->s_master_keys->lock);
        hlist_del_rcu(&mk->mk_node);
        spin_unlock(&sb->s_master_keys->lock);
@@ -207,10 +209,11 @@ static int allocate_filesystem_keyring(struct super_block *sb)
  * Release all encryption keys that have been added to the filesystem, along
  * with the keyring that contains them.
  *
- * This is called at unmount time.  The filesystem's underlying block device(s)
- * are still available at this time; this is important because after user file
- * accesses have been allowed, this function may need to evict keys from the
- * keyslots of an inline crypto engine, which requires the block device(s).
+ * This is called at unmount time, after all potentially-encrypted inodes have
+ * been evicted.  The filesystem's underlying block device(s) are still
+ * available at this time; this is important because after user file accesses
+ * have been allowed, this function may need to evict keys from the keyslots of
+ * an inline crypto engine, which requires the block device(s).
  */
 void fscrypt_destroy_keyring(struct super_block *sb)
 {
@@ -227,12 +230,12 @@ void fscrypt_destroy_keyring(struct super_block *sb)
 
                hlist_for_each_entry_safe(mk, tmp, bucket, mk_node) {
                        /*
-                        * Since all inodes were already evicted, every key
-                        * remaining in the keyring should have an empty inode
-                        * list, and should only still be in the keyring due to
-                        * the single active ref associated with ->mk_secret.
-                        * There should be no structural refs beyond the one
-                        * associated with the active ref.
+                        * Since all potentially-encrypted inodes were already
+                        * evicted, every key remaining in the keyring should
+                        * have an empty inode list, and should only still be in
+                        * the keyring due to the single active ref associated
+                        * with ->mk_secret.  There should be no structural refs
+                        * beyond the one associated with the active ref.
                         */
                        WARN_ON(refcount_read(&mk->mk_active_refs) != 1);
                        WARN_ON(refcount_read(&mk->mk_struct_refs) != 1);
index 31e21de56432d194e1e95eaa5d326e32c0cbcdc2..a5010b5b8a8c11b5909c137b649fe5d04849d832 100644 (file)
@@ -3884,10 +3884,8 @@ static int ext4_rename(struct mnt_idmap *idmap, struct inode *old_dir,
                                goto end_rename;
                }
                retval = ext4_rename_dir_prepare(handle, &old);
-               if (retval) {
-                       inode_unlock(old.inode);
+               if (retval)
                        goto end_rename;
-               }
        }
        /*
         * If we're renaming a file within an inline_data dir and adding or
index 6fe9ca253b709e8b8c193ab9a038206cd1f6c069..2e215e8c3c88e57d6ed17ba6cc5cb22420e99af6 100644 (file)
@@ -83,8 +83,26 @@ static int gfs2_dhash(const struct dentry *dentry, struct qstr *str)
        return 0;
 }
 
+static int gfs2_dentry_delete(const struct dentry *dentry)
+{
+       struct gfs2_inode *ginode;
+
+       if (d_really_is_negative(dentry))
+               return 0;
+
+       ginode = GFS2_I(d_inode(dentry));
+       if (!gfs2_holder_initialized(&ginode->i_iopen_gh))
+               return 0;
+
+       if (test_bit(GLF_DEMOTE, &ginode->i_iopen_gh.gh_gl->gl_flags))
+               return 1;
+
+       return 0;
+}
+
 const struct dentry_operations gfs2_dops = {
        .d_revalidate = gfs2_drevalidate,
        .d_hash = gfs2_dhash,
+       .d_delete = gfs2_dentry_delete,
 };
 
index 6e61b5bc7d86ed5add4e940a3972068a508c52a0..cead696b656a8cd2a39a3fa4598dd656f39515b0 100644 (file)
@@ -727,8 +727,9 @@ static int generate_key(struct ksmbd_conn *conn, struct ksmbd_session *sess,
                goto smb3signkey_ret;
        }
 
-       if (conn->cipher_type == SMB2_ENCRYPTION_AES256_CCM ||
-           conn->cipher_type == SMB2_ENCRYPTION_AES256_GCM)
+       if (key_size == SMB3_ENC_DEC_KEY_SIZE &&
+           (conn->cipher_type == SMB2_ENCRYPTION_AES256_CCM ||
+            conn->cipher_type == SMB2_ENCRYPTION_AES256_GCM))
                rc = crypto_shash_update(CRYPTO_HMACSHA256(ctx), L256, 4);
        else
                rc = crypto_shash_update(CRYPTO_HMACSHA256(ctx), L128, 4);
index 5b10b03800c163fb7704ef3d56ba7b664874d932..115a67d2cf7850f83a6102f52d35f76e0014ed47 100644 (file)
@@ -298,7 +298,7 @@ int ksmbd_conn_handler_loop(void *p)
                kvfree(conn->request_buf);
                conn->request_buf = NULL;
 
-               size = t->ops->read(t, hdr_buf, sizeof(hdr_buf));
+               size = t->ops->read(t, hdr_buf, sizeof(hdr_buf), -1);
                if (size != sizeof(hdr_buf))
                        break;
 
@@ -319,13 +319,10 @@ int ksmbd_conn_handler_loop(void *p)
                }
 
                /*
-                * Check if pdu size is valid (min : smb header size,
-                * max : 0x00FFFFFF).
+                * Check maximum pdu size(0x00FFFFFF).
                 */
-               if (pdu_size < __SMB2_HEADER_STRUCTURE_SIZE ||
-                   pdu_size > MAX_STREAM_PROT_LEN) {
+               if (pdu_size > MAX_STREAM_PROT_LEN)
                        break;
-               }
 
                /* 4 for rfc1002 length field */
                size = pdu_size + 4;
@@ -344,7 +341,7 @@ int ksmbd_conn_handler_loop(void *p)
                 * We already read 4 bytes to find out PDU size, now
                 * read in PDU
                 */
-               size = t->ops->read(t, conn->request_buf + 4, pdu_size);
+               size = t->ops->read(t, conn->request_buf + 4, pdu_size, 2);
                if (size < 0) {
                        pr_err("sock_read failed: %d\n", size);
                        break;
index 3643354a3fa79c2b351c581691eff7ee74fe761b..0e3a848defaf3214ed752deed6c51854890a4194 100644 (file)
@@ -114,7 +114,8 @@ struct ksmbd_transport_ops {
        int (*prepare)(struct ksmbd_transport *t);
        void (*disconnect)(struct ksmbd_transport *t);
        void (*shutdown)(struct ksmbd_transport *t);
-       int (*read)(struct ksmbd_transport *t, char *buf, unsigned int size);
+       int (*read)(struct ksmbd_transport *t, char *buf,
+                   unsigned int size, int max_retries);
        int (*writev)(struct ksmbd_transport *t, struct kvec *iovs, int niov,
                      int size, bool need_invalidate_rkey,
                      unsigned int remote_key);
index 0685c1c77b9fc2b443866480ed7a7ae89dad7c18..97c9d1b5bcc0bcd2a6fbe97719b22820c47ac057 100644 (file)
@@ -2977,8 +2977,11 @@ int smb2_open(struct ksmbd_work *work)
                                                        sizeof(struct smb_acl) +
                                                        sizeof(struct smb_ace) * ace_num * 2,
                                                        GFP_KERNEL);
-                                       if (!pntsd)
+                                       if (!pntsd) {
+                                               posix_acl_release(fattr.cf_acls);
+                                               posix_acl_release(fattr.cf_dacls);
                                                goto err_out;
+                                       }
 
                                        rc = build_sec_desc(idmap,
                                                            pntsd, NULL, 0,
@@ -4934,6 +4937,10 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
 
                info->Attributes |= cpu_to_le32(server_conf.share_fake_fscaps);
 
+               if (test_share_config_flag(work->tcon->share_conf,
+                   KSMBD_SHARE_FLAG_STREAMS))
+                       info->Attributes |= cpu_to_le32(FILE_NAMED_STREAMS);
+
                info->MaxPathNameComponentLength = cpu_to_le32(stfs.f_namelen);
                len = smbConvertToUTF16((__le16 *)info->FileSystemName,
                                        "NTFS", PATH_MAX, conn->local_nls, 0);
@@ -7444,13 +7451,16 @@ static int fsctl_query_allocated_ranges(struct ksmbd_work *work, u64 id,
        if (in_count == 0)
                return -EINVAL;
 
+       start = le64_to_cpu(qar_req->file_offset);
+       length = le64_to_cpu(qar_req->length);
+
+       if (start < 0 || length < 0)
+               return -EINVAL;
+
        fp = ksmbd_lookup_fd_fast(work, id);
        if (!fp)
                return -ENOENT;
 
-       start = le64_to_cpu(qar_req->file_offset);
-       length = le64_to_cpu(qar_req->length);
-
        ret = ksmbd_vfs_fqar_lseek(fp, start, length,
                                   qar_rsp, in_count, out_count);
        if (ret && ret != -E2BIG)
@@ -7751,7 +7761,7 @@ int smb2_ioctl(struct ksmbd_work *work)
 
                off = le64_to_cpu(zero_data->FileOffset);
                bfz = le64_to_cpu(zero_data->BeyondFinalZero);
-               if (off > bfz) {
+               if (off < 0 || bfz < 0 || off > bfz) {
                        ret = -EINVAL;
                        goto out;
                }
index fa2b54df6ee6ff2aad4da4f3090010b26a863c63..9c1ce6d199ce4a46ca9129f69d4715496779450c 100644 (file)
@@ -434,7 +434,7 @@ int ksmbd_extract_shortname(struct ksmbd_conn *conn, const char *longname,
 
 static int __smb2_negotiate(struct ksmbd_conn *conn)
 {
-       return (conn->dialect >= SMB21_PROT_ID &&
+       return (conn->dialect >= SMB20_PROT_ID &&
                conn->dialect <= SMB311_PROT_ID);
 }
 
@@ -442,9 +442,26 @@ static int smb_handle_negotiate(struct ksmbd_work *work)
 {
        struct smb_negotiate_rsp *neg_rsp = work->response_buf;
 
-       ksmbd_debug(SMB, "Unsupported SMB protocol\n");
-       neg_rsp->hdr.Status.CifsError = STATUS_INVALID_LOGON_TYPE;
-       return -EINVAL;
+       ksmbd_debug(SMB, "Unsupported SMB1 protocol\n");
+
+       /*
+        * Remove 4 byte direct TCP header, add 2 byte bcc and
+        * 2 byte DialectIndex.
+        */
+       *(__be32 *)work->response_buf =
+               cpu_to_be32(sizeof(struct smb_hdr) - 4 + 2 + 2);
+       neg_rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+
+       neg_rsp->hdr.Command = SMB_COM_NEGOTIATE;
+       *(__le32 *)neg_rsp->hdr.Protocol = SMB1_PROTO_NUMBER;
+       neg_rsp->hdr.Flags = SMBFLG_RESPONSE;
+       neg_rsp->hdr.Flags2 = SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS |
+               SMBFLG2_EXT_SEC | SMBFLG2_IS_LONG_NAME;
+
+       neg_rsp->hdr.WordCount = 1;
+       neg_rsp->DialectIndex = cpu_to_le16(work->conn->dialect);
+       neg_rsp->ByteCount = 0;
+       return 0;
 }
 
 int ksmbd_smb_negotiate_common(struct ksmbd_work *work, unsigned int command)
@@ -465,7 +482,7 @@ int ksmbd_smb_negotiate_common(struct ksmbd_work *work, unsigned int command)
                }
        }
 
-       if (command == SMB2_NEGOTIATE_HE && __smb2_negotiate(conn)) {
+       if (command == SMB2_NEGOTIATE_HE) {
                ret = smb2_handle_negotiate(work);
                init_smb2_neg_rsp(work);
                return ret;
index e663ab9ea759092d9e1cb0282b76d0059490581c..d30ce4c1a151703d9f4f512882ee5995a8f93773 100644 (file)
 
 #define SMB1_PROTO_NUMBER              cpu_to_le32(0x424d53ff)
 #define SMB_COM_NEGOTIATE              0x72
-
 #define SMB1_CLIENT_GUID_SIZE          (16)
+
+#define SMBFLG_RESPONSE 0x80   /* this PDU is a response from server */
+
+#define SMBFLG2_IS_LONG_NAME   cpu_to_le16(0x40)
+#define SMBFLG2_EXT_SEC                cpu_to_le16(0x800)
+#define SMBFLG2_ERR_STATUS     cpu_to_le16(0x4000)
+#define SMBFLG2_UNICODE                cpu_to_le16(0x8000)
+
 struct smb_hdr {
        __be32 smb_buf_length;
        __u8 Protocol[4];
@@ -199,28 +206,7 @@ struct smb_negotiate_req {
 struct smb_negotiate_rsp {
        struct smb_hdr hdr;     /* wct = 17 */
        __le16 DialectIndex; /* 0xFFFF = no dialect acceptable */
-       __u8 SecurityMode;
-       __le16 MaxMpxCount;
-       __le16 MaxNumberVcs;
-       __le32 MaxBufferSize;
-       __le32 MaxRawSize;
-       __le32 SessionKey;
-       __le32 Capabilities;    /* see below */
-       __le32 SystemTimeLow;
-       __le32 SystemTimeHigh;
-       __le16 ServerTimeZone;
-       __u8 EncryptionKeyLength;
        __le16 ByteCount;
-       union {
-               unsigned char EncryptionKey[8]; /* cap extended security off */
-               /* followed by Domain name - if extended security is off */
-               /* followed by 16 bytes of server GUID */
-               /* then security blob if cap_extended_security negotiated */
-               struct {
-                       unsigned char GUID[SMB1_CLIENT_GUID_SIZE];
-                       unsigned char SecurityBlob[1];
-               } __packed extended_response;
-       } __packed u;
 } __packed;
 
 struct filesystem_attribute_info {
index 096eda9ef873b670b46e735178bccb15742c4268..c06efc020bd95475cfd83884fe79b11638c57f5a 100644 (file)
@@ -670,7 +670,7 @@ static int smb_direct_post_recv(struct smb_direct_transport *t,
 }
 
 static int smb_direct_read(struct ksmbd_transport *t, char *buf,
-                          unsigned int size)
+                          unsigned int size, int unused)
 {
        struct smb_direct_recvmsg *recvmsg;
        struct smb_direct_data_transfer *data_transfer;
index 603893fd87f57d632107258052df2b79407db3ae..20e85e2701f26cf38daac453a41cc5eee2c14245 100644 (file)
@@ -291,16 +291,18 @@ static int ksmbd_tcp_run_kthread(struct interface *iface)
 
 /**
  * ksmbd_tcp_readv() - read data from socket in given iovec
- * @t:         TCP transport instance
- * @iov_orig:  base IO vector
- * @nr_segs:   number of segments in base iov
- * @to_read:   number of bytes to read from socket
+ * @t:                 TCP transport instance
+ * @iov_orig:          base IO vector
+ * @nr_segs:           number of segments in base iov
+ * @to_read:           number of bytes to read from socket
+ * @max_retries:       maximum retry count
  *
  * Return:     on success return number of bytes read from socket,
  *             otherwise return error number
  */
 static int ksmbd_tcp_readv(struct tcp_transport *t, struct kvec *iov_orig,
-                          unsigned int nr_segs, unsigned int to_read)
+                          unsigned int nr_segs, unsigned int to_read,
+                          int max_retries)
 {
        int length = 0;
        int total_read;
@@ -308,7 +310,6 @@ static int ksmbd_tcp_readv(struct tcp_transport *t, struct kvec *iov_orig,
        struct msghdr ksmbd_msg;
        struct kvec *iov;
        struct ksmbd_conn *conn = KSMBD_TRANS(t)->conn;
-       int max_retry = 2;
 
        iov = get_conn_iovec(t, nr_segs);
        if (!iov)
@@ -335,14 +336,23 @@ static int ksmbd_tcp_readv(struct tcp_transport *t, struct kvec *iov_orig,
                } else if (conn->status == KSMBD_SESS_NEED_RECONNECT) {
                        total_read = -EAGAIN;
                        break;
-               } else if ((length == -ERESTARTSYS || length == -EAGAIN) &&
-                          max_retry) {
+               } else if (length == -ERESTARTSYS || length == -EAGAIN) {
+                       /*
+                        * If max_retries is negative, Allow unlimited
+                        * retries to keep connection with inactive sessions.
+                        */
+                       if (max_retries == 0) {
+                               total_read = length;
+                               break;
+                       } else if (max_retries > 0) {
+                               max_retries--;
+                       }
+
                        usleep_range(1000, 2000);
                        length = 0;
-                       max_retry--;
                        continue;
                } else if (length <= 0) {
-                       total_read = -EAGAIN;
+                       total_read = length;
                        break;
                }
        }
@@ -358,14 +368,15 @@ static int ksmbd_tcp_readv(struct tcp_transport *t, struct kvec *iov_orig,
  * Return:     on success return number of bytes read from socket,
  *             otherwise return error number
  */
-static int ksmbd_tcp_read(struct ksmbd_transport *t, char *buf, unsigned int to_read)
+static int ksmbd_tcp_read(struct ksmbd_transport *t, char *buf,
+                         unsigned int to_read, int max_retries)
 {
        struct kvec iov;
 
        iov.iov_base = buf;
        iov.iov_len = to_read;
 
-       return ksmbd_tcp_readv(TCP_TRANS(t), &iov, 1, to_read);
+       return ksmbd_tcp_readv(TCP_TRANS(t), &iov, 1, to_read, max_retries);
 }
 
 static int ksmbd_tcp_writev(struct ksmbd_transport *t, struct kvec *iov,
index 7df6324ccb8ab33ac9e04de7cbcfb11bb4eb1413..8161667c976f8c487de84ba9fcba189ae29072d9 100644 (file)
@@ -261,7 +261,6 @@ static int decode_nlm4_holder(struct xdr_stream *xdr, struct nlm_res *result)
        u32 exclusive;
        int error;
        __be32 *p;
-       s32 end;
 
        memset(lock, 0, sizeof(*lock));
        locks_init_lock(fl);
@@ -285,13 +284,7 @@ static int decode_nlm4_holder(struct xdr_stream *xdr, struct nlm_res *result)
        fl->fl_type  = exclusive != 0 ? F_WRLCK : F_RDLCK;
        p = xdr_decode_hyper(p, &l_offset);
        xdr_decode_hyper(p, &l_len);
-       end = l_offset + l_len - 1;
-
-       fl->fl_start = (loff_t)l_offset;
-       if (l_len == 0 || end < 0)
-               fl->fl_end = OFFSET_MAX;
-       else
-               fl->fl_end = (loff_t)end;
+       nlm4svc_set_file_lock_range(fl, l_offset, l_len);
        error = 0;
 out:
        return error;
index 712fdfeb8ef063bd1ad12f80c3c2ddf30f75101e..5fcbf30cd275928d1d364cf63e4f39fb61cb2a5a 100644 (file)
@@ -33,6 +33,17 @@ loff_t_to_s64(loff_t offset)
        return res;
 }
 
+void nlm4svc_set_file_lock_range(struct file_lock *fl, u64 off, u64 len)
+{
+       s64 end = off + len - 1;
+
+       fl->fl_start = off;
+       if (len == 0 || end < 0)
+               fl->fl_end = OFFSET_MAX;
+       else
+               fl->fl_end = end;
+}
+
 /*
  * NLM file handles are defined by specification to be a variable-length
  * XDR opaque no longer than 1024 bytes. However, this implementation
@@ -80,7 +91,7 @@ svcxdr_decode_lock(struct xdr_stream *xdr, struct nlm_lock *lock)
        locks_init_lock(fl);
        fl->fl_flags = FL_POSIX;
        fl->fl_type  = F_RDLCK;
-
+       nlm4svc_set_file_lock_range(fl, lock->lock_start, lock->lock_len);
        return true;
 }
 
index 14a72224b6571b9617d586f766e39dcb5f1772eb..450d6c3bc05e27dd6370d45f6137a9bc89a4874c 100644 (file)
@@ -75,7 +75,7 @@ config NFS_V3_ACL
 config NFS_V4
        tristate "NFS client support for NFS version 4"
        depends on NFS_FS
-       select SUNRPC_GSS
+       select RPCSEC_GSS_KRB5
        select KEYS
        help
          This option enables support for version 4 of the NFS protocol
index a41c3ee4549c074fcdcbb91cbcfce463803fb78b..6fbcbb8d6587a2754b0d7283abf2a66ba4211056 100644 (file)
@@ -3089,7 +3089,6 @@ static void nfs_access_add_rbtree(struct inode *inode,
                else
                        goto found;
        }
-       set->timestamp = ktime_get_ns();
        rb_link_node(&set->rb_node, parent, p);
        rb_insert_color(&set->rb_node, root_node);
        list_add_tail(&set->lru, &nfsi->access_cache_entry_lru);
@@ -3114,6 +3113,7 @@ void nfs_access_add_cache(struct inode *inode, struct nfs_access_entry *set,
        cache->fsgid = cred->fsgid;
        cache->group_info = get_group_info(cred->group_info);
        cache->mask = set->mask;
+       cache->timestamp = ktime_get_ns();
 
        /* The above field assignments must be visible
         * before this item appears on the lru.  We cannot easily
index c380cff4108e0693bd04580c18b2da9747f486da..e90988591df4f253f130d5d10eb4a60e5d9843da 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/stat.h>
 #include <linux/mm.h>
 #include <linux/slab.h>
+#include <linux/task_io_accounting_ops.h>
 #include <linux/pagemap.h>
 #include <linux/sunrpc/clnt.h>
 #include <linux/nfs_fs.h>
@@ -337,6 +338,7 @@ int nfs_read_folio(struct file *file, struct folio *folio)
 
        trace_nfs_aop_readpage(inode, folio);
        nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
+       task_io_account_read(folio_size(folio));
 
        /*
         * Try to flush any pending writes to the file..
@@ -393,6 +395,7 @@ void nfs_readahead(struct readahead_control *ractl)
 
        trace_nfs_aop_readahead(inode, readahead_pos(ractl), nr_pages);
        nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
+       task_io_account_read(readahead_length(ractl));
 
        ret = -ESTALE;
        if (NFS_STALE(inode))
index 7c441f2bd4440c17bd1e5c59d172ff4595c8cb31..43b88eaf0673ab0bfda775646b40835d264e3748 100644 (file)
@@ -73,7 +73,7 @@ config NFSD_V4
        bool "NFS server support for NFS version 4"
        depends on NFSD && PROC_FS
        select FS_POSIX_ACL
-       select SUNRPC_GSS
+       select RPCSEC_GSS_KRB5
        select CRYPTO
        select CRYPTO_MD5
        select CRYPTO_SHA256
index 502e1b7742dbf604e8a30f502f34292fb38ed1cc..5783209f17fc522a4c211149d5e55cd6184ff594 100644 (file)
@@ -941,8 +941,15 @@ nfsd_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
        struct page *last_page;
 
        last_page = page + (offset + sd->len - 1) / PAGE_SIZE;
-       for (page += offset / PAGE_SIZE; page <= last_page; page++)
+       for (page += offset / PAGE_SIZE; page <= last_page; page++) {
+               /*
+                * Skip page replacement when extending the contents
+                * of the current page.
+                */
+               if (page == *(rqstp->rq_next_page - 1))
+                       continue;
                svc_rqst_replace_page(rqstp, page);
+       }
        if (rqstp->rq_res.page_len == 0)        // first call
                rqstp->rq_res.page_base = offset % PAGE_SIZE;
        rqstp->rq_res.page_len += sd->len;
index 5ccc638ae92f7d2bf7cc7f3f9cad79e56dd20b2e..1dfbc0c34513fb022ff6b2c55f817cddea410dde 100644 (file)
@@ -71,7 +71,7 @@ static int nilfs_ioctl_wrap_copy(struct the_nilfs *nilfs,
        if (argv->v_index > ~(__u64)0 - argv->v_nmembs)
                return -EINVAL;
 
-       buf = (void *)__get_free_pages(GFP_NOFS, 0);
+       buf = (void *)get_zeroed_page(GFP_NOFS);
        if (unlikely(!buf))
                return -ENOMEM;
        maxmembs = PAGE_SIZE / argv->v_size;
index 1d65f6ef00ca8ba7a42eae4c3e87ff885201c43b..0394505fdce3fa0a70161c48aa0f28ce4128ef43 100644 (file)
@@ -1977,11 +1977,26 @@ int ocfs2_write_end_nolock(struct address_space *mapping,
        }
 
        if (unlikely(copied < len) && wc->w_target_page) {
+               loff_t new_isize;
+
                if (!PageUptodate(wc->w_target_page))
                        copied = 0;
 
-               ocfs2_zero_new_buffers(wc->w_target_page, start+copied,
-                                      start+len);
+               new_isize = max_t(loff_t, i_size_read(inode), pos + copied);
+               if (new_isize > page_offset(wc->w_target_page))
+                       ocfs2_zero_new_buffers(wc->w_target_page, start+copied,
+                                              start+len);
+               else {
+                       /*
+                        * When page is fully beyond new isize (data copy
+                        * failed), do not bother zeroing the page. Invalidate
+                        * it instead so that writeback does not get confused
+                        * put page & buffer dirty bits into inconsistent
+                        * state.
+                        */
+                       block_invalidate_folio(page_folio(wc->w_target_page),
+                                               0, PAGE_SIZE);
+               }
        }
        if (wc->w_target_page)
                flush_dcache_page(wc->w_target_page);
index 84332d5cb817abbec2e206f7d25ce5db134a8fc8..04bc62ab7dfea9dde08c407fd4b5c04a7cbaedb0 100644 (file)
@@ -475,13 +475,22 @@ void generic_shutdown_super(struct super_block *sb)
 
                cgroup_writeback_umount();
 
-               /* evict all inodes with zero refcount */
+               /* Evict all inodes with zero refcount. */
                evict_inodes(sb);
-               /* only nonzero refcount inodes can have marks */
+
+               /*
+                * Clean up and evict any inodes that still have references due
+                * to fsnotify or the security policy.
+                */
                fsnotify_sb_delete(sb);
-               fscrypt_destroy_keyring(sb);
                security_sb_delete(sb);
 
+               /*
+                * Now that all potentially-encrypted inodes have been evicted,
+                * the fscrypt keyring can be destroyed.
+                */
+               fscrypt_destroy_keyring(sb);
+
                if (sb->s_dio_done_wq) {
                        destroy_workqueue(sb->s_dio_done_wq);
                        sb->s_dio_done_wq = NULL;
index e13db6507b38b64b3c7e3acaff729831fc3daab2..7a0e3a84d370bc1d3a320762c7781fd4f752ec14 100644 (file)
@@ -8,7 +8,6 @@
 #include "fsverity_private.h"
 
 #include <linux/mount.h>
-#include <linux/pagemap.h>
 #include <linux/sched/signal.h>
 #include <linux/uaccess.h>
 
@@ -367,25 +366,27 @@ int fsverity_ioctl_enable(struct file *filp, const void __user *uarg)
                goto out_drop_write;
 
        err = enable_verity(filp, &arg);
-       if (err)
-               goto out_allow_write_access;
 
        /*
-        * Some pages of the file may have been evicted from pagecache after
-        * being used in the Merkle tree construction, then read into pagecache
-        * again by another process reading from the file concurrently.  Since
-        * these pages didn't undergo verification against the file digest which
-        * fs-verity now claims to be enforcing, we have to wipe the pagecache
-        * to ensure that all future reads are verified.
+        * We no longer drop the inode's pagecache after enabling verity.  This
+        * used to be done to try to avoid a race condition where pages could be
+        * evicted after being used in the Merkle tree construction, then
+        * re-instantiated by a concurrent read.  Such pages are unverified, and
+        * the backing storage could have filled them with different content, so
+        * they shouldn't be used to fulfill reads once verity is enabled.
+        *
+        * But, dropping the pagecache has a big performance impact, and it
+        * doesn't fully solve the race condition anyway.  So for those reasons,
+        * and also because this race condition isn't very important relatively
+        * speaking (especially for small-ish files, where the chance of a page
+        * being used, evicted, *and* re-instantiated all while enabling verity
+        * is quite small), we no longer drop the inode's pagecache.
         */
-       filemap_write_and_wait(inode->i_mapping);
-       invalidate_inode_pages2(inode->i_mapping);
 
        /*
         * allow_write_access() is needed to pair with deny_write_access().
         * Regardless, the filesystem won't allow writing to verity files.
         */
-out_allow_write_access:
        allow_write_access(filp);
 out_drop_write:
        mnt_drop_write_file(filp);
index f50e3b5b52c932eb3635a9fc146ba3aad6126270..e2508222750b35eabe154cf1d5609f95680bcc69 100644 (file)
@@ -387,15 +387,15 @@ EXPORT_SYMBOL_GPL(fsverity_enqueue_verify_work);
 int __init fsverity_init_workqueue(void)
 {
        /*
-        * Use an unbound workqueue to allow bios to be verified in parallel
-        * even when they happen to complete on the same CPU.  This sacrifices
-        * locality, but it's worthwhile since hashing is CPU-intensive.
+        * Use a high-priority workqueue to prioritize verification work, which
+        * blocks reads from completing, over regular application tasks.
         *
-        * Also use a high-priority workqueue to prioritize verification work,
-        * which blocks reads from completing, over regular application tasks.
+        * For performance reasons, don't use an unbound workqueue.  Using an
+        * unbound workqueue for crypto operations causes excessive scheduler
+        * latency on ARM64.
         */
        fsverity_read_workqueue = alloc_workqueue("fsverity_read_queue",
-                                                 WQ_UNBOUND | WQ_HIGHPRI,
+                                                 WQ_HIGHPRI,
                                                  num_online_cpus());
        if (!fsverity_read_workqueue)
                return -ENOMEM;
index 03135a1c31b673fa435ed3853e83b166eb5e6b29..92d88dc3c9f71fad2424ddfba6692234ff57e16b 100644 (file)
@@ -63,6 +63,7 @@ xfs-y                         += xfs_aops.o \
                                   xfs_bmap_util.o \
                                   xfs_bio_io.o \
                                   xfs_buf.o \
+                                  xfs_dahash_test.o \
                                   xfs_dir2_readdir.o \
                                   xfs_discard.o \
                                   xfs_error.o \
index 6a037173d20d99e60239ad51e15c2a66822ae5df..203f16c48c19902e607b77250d41114d67205b97 100644 (file)
@@ -3045,6 +3045,8 @@ xfs_alloc_read_agf(
                pag->pagf_refcount_level = be32_to_cpu(agf->agf_refcount_level);
                if (xfs_agfl_needs_reset(pag->pag_mount, agf))
                        set_bit(XFS_AGSTATE_AGFL_NEEDS_RESET, &pag->pag_opstate);
+               else
+                       clear_bit(XFS_AGSTATE_AGFL_NEEDS_RESET, &pag->pag_opstate);
 
                /*
                 * Update the in-core allocbt counter. Filter out the rmapbt
@@ -3255,6 +3257,8 @@ xfs_alloc_vextent_finish(
        XFS_STATS_INC(mp, xs_allocx);
        XFS_STATS_ADD(mp, xs_allocb, args->len);
 
+       trace_xfs_alloc_vextent_finish(args);
+
 out_drop_perag:
        if (drop_perag && args->pag) {
                xfs_perag_rele(args->pag);
@@ -3279,8 +3283,14 @@ xfs_alloc_vextent_this_ag(
        xfs_agnumber_t          minimum_agno;
        int                     error;
 
+       ASSERT(args->pag != NULL);
+       ASSERT(args->pag->pag_agno == agno);
+
        args->agno = agno;
        args->agbno = 0;
+
+       trace_xfs_alloc_vextent_this_ag(args);
+
        error = xfs_alloc_vextent_check_args(args, XFS_AGB_TO_FSB(mp, agno, 0),
                        &minimum_agno);
        if (error) {
@@ -3323,11 +3333,14 @@ xfs_alloc_vextent_iterate_ags(
        uint32_t                flags)
 {
        struct xfs_mount        *mp = args->mp;
+       xfs_agnumber_t          restart_agno = minimum_agno;
        xfs_agnumber_t          agno;
        int                     error = 0;
 
+       if (flags & XFS_ALLOC_FLAG_TRYLOCK)
+               restart_agno = 0;
 restart:
-       for_each_perag_wrap_range(mp, start_agno, minimum_agno,
+       for_each_perag_wrap_range(mp, start_agno, restart_agno,
                        mp->m_sb.sb_agcount, agno, args->pag) {
                args->agno = agno;
                error = xfs_alloc_vextent_prepare_ag(args);
@@ -3366,6 +3379,7 @@ restart:
         */
        if (flags) {
                flags = 0;
+               restart_agno = minimum_agno;
                goto restart;
        }
 
@@ -3394,8 +3408,13 @@ xfs_alloc_vextent_start_ag(
        bool                    bump_rotor = false;
        int                     error;
 
+       ASSERT(args->pag == NULL);
+
        args->agno = NULLAGNUMBER;
        args->agbno = NULLAGBLOCK;
+
+       trace_xfs_alloc_vextent_start_ag(args);
+
        error = xfs_alloc_vextent_check_args(args, target, &minimum_agno);
        if (error) {
                if (error == -ENOSPC)
@@ -3442,8 +3461,13 @@ xfs_alloc_vextent_first_ag(
        xfs_agnumber_t          start_agno;
        int                     error;
 
+       ASSERT(args->pag == NULL);
+
        args->agno = NULLAGNUMBER;
        args->agbno = NULLAGBLOCK;
+
+       trace_xfs_alloc_vextent_first_ag(args);
+
        error = xfs_alloc_vextent_check_args(args, target, &minimum_agno);
        if (error) {
                if (error == -ENOSPC)
@@ -3470,8 +3494,14 @@ xfs_alloc_vextent_exact_bno(
        xfs_agnumber_t          minimum_agno;
        int                     error;
 
+       ASSERT(args->pag != NULL);
+       ASSERT(args->pag->pag_agno == XFS_FSB_TO_AGNO(mp, target));
+
        args->agno = XFS_FSB_TO_AGNO(mp, target);
        args->agbno = XFS_FSB_TO_AGBNO(mp, target);
+
+       trace_xfs_alloc_vextent_exact_bno(args);
+
        error = xfs_alloc_vextent_check_args(args, target, &minimum_agno);
        if (error) {
                if (error == -ENOSPC)
@@ -3502,8 +3532,14 @@ xfs_alloc_vextent_near_bno(
        bool                    needs_perag = args->pag == NULL;
        int                     error;
 
+       if (!needs_perag)
+               ASSERT(args->pag->pag_agno == XFS_FSB_TO_AGNO(mp, target));
+
        args->agno = XFS_FSB_TO_AGNO(mp, target);
        args->agbno = XFS_FSB_TO_AGBNO(mp, target);
+
+       trace_xfs_alloc_vextent_near_bno(args);
+
        error = xfs_alloc_vextent_check_args(args, target, &minimum_agno);
        if (error) {
                if (error == -ENOSPC)
diff --git a/fs/xfs/xfs_dahash_test.c b/fs/xfs/xfs_dahash_test.c
new file mode 100644 (file)
index 0000000..230651a
--- /dev/null
@@ -0,0 +1,662 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2023 Oracle.  All Rights Reserved.
+ * Author: Darrick J. Wong <djwong@kernel.org>
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_da_format.h"
+#include "xfs_da_btree.h"
+#include "xfs_dahash_test.h"
+
+/* 4096 random bytes */
+static uint8_t __initdata __attribute__((__aligned__(8))) test_buf[] =
+{
+       0x5b, 0x85, 0x21, 0xcb, 0x09, 0x68, 0x7d, 0x30,
+       0xc7, 0x69, 0xd7, 0x30, 0x92, 0xde, 0x59, 0xe4,
+       0xc9, 0x6e, 0x8b, 0xdb, 0x98, 0x6b, 0xaa, 0x60,
+       0xa8, 0xb5, 0xbc, 0x6c, 0xa9, 0xb1, 0x5b, 0x2c,
+       0xea, 0xb4, 0x92, 0x6a, 0x3f, 0x79, 0x91, 0xe4,
+       0xe9, 0x70, 0x51, 0x8c, 0x7f, 0x95, 0x6f, 0x1a,
+       0x56, 0xa1, 0x5c, 0x27, 0x03, 0x67, 0x9f, 0x3a,
+       0xe2, 0x31, 0x11, 0x29, 0x6b, 0x98, 0xfc, 0xc4,
+       0x53, 0x24, 0xc5, 0x8b, 0xce, 0x47, 0xb2, 0xb9,
+       0x32, 0xcb, 0xc1, 0xd0, 0x03, 0x57, 0x4e, 0xd4,
+       0xe9, 0x3c, 0xa1, 0x63, 0xcf, 0x12, 0x0e, 0xca,
+       0xe1, 0x13, 0xd1, 0x93, 0xa6, 0x88, 0x5c, 0x61,
+       0x5b, 0xbb, 0xf0, 0x19, 0x46, 0xb4, 0xcf, 0x9e,
+       0xb6, 0x6b, 0x4c, 0x3a, 0xcf, 0x60, 0xf9, 0x7a,
+       0x8d, 0x07, 0x63, 0xdb, 0x40, 0xe9, 0x0b, 0x6f,
+       0xad, 0x97, 0xf1, 0xed, 0xd0, 0x1e, 0x26, 0xfd,
+       0xbf, 0xb7, 0xc8, 0x04, 0x94, 0xf8, 0x8b, 0x8c,
+       0xf1, 0xab, 0x7a, 0xd4, 0xdd, 0xf3, 0xe8, 0x88,
+       0xc3, 0xed, 0x17, 0x8a, 0x9b, 0x40, 0x0d, 0x53,
+       0x62, 0x12, 0x03, 0x5f, 0x1b, 0x35, 0x32, 0x1f,
+       0xb4, 0x7b, 0x93, 0x78, 0x0d, 0xdb, 0xce, 0xa4,
+       0xc0, 0x47, 0xd5, 0xbf, 0x68, 0xe8, 0x5d, 0x74,
+       0x8f, 0x8e, 0x75, 0x1c, 0xb2, 0x4f, 0x9a, 0x60,
+       0xd1, 0xbe, 0x10, 0xf4, 0x5c, 0xa1, 0x53, 0x09,
+       0xa5, 0xe0, 0x09, 0x54, 0x85, 0x5c, 0xdc, 0x07,
+       0xe7, 0x21, 0x69, 0x7b, 0x8a, 0xfd, 0x90, 0xf1,
+       0x22, 0xd0, 0xb4, 0x36, 0x28, 0xe6, 0xb8, 0x0f,
+       0x39, 0xde, 0xc8, 0xf3, 0x86, 0x60, 0x34, 0xd2,
+       0x5e, 0xdf, 0xfd, 0xcf, 0x0f, 0xa9, 0x65, 0xf0,
+       0xd5, 0x4d, 0x96, 0x40, 0xe3, 0xdf, 0x3f, 0x95,
+       0x5a, 0x39, 0x19, 0x93, 0xf4, 0x75, 0xce, 0x22,
+       0x00, 0x1c, 0x93, 0xe2, 0x03, 0x66, 0xf4, 0x93,
+       0x73, 0x86, 0x81, 0x8e, 0x29, 0x44, 0x48, 0x86,
+       0x61, 0x7c, 0x48, 0xa3, 0x43, 0xd2, 0x9c, 0x8d,
+       0xd4, 0x95, 0xdd, 0xe1, 0x22, 0x89, 0x3a, 0x40,
+       0x4c, 0x1b, 0x8a, 0x04, 0xa8, 0x09, 0x69, 0x8b,
+       0xea, 0xc6, 0x55, 0x8e, 0x57, 0xe6, 0x64, 0x35,
+       0xf0, 0xc7, 0x16, 0x9f, 0x5d, 0x5e, 0x86, 0x40,
+       0x46, 0xbb, 0xe5, 0x45, 0x88, 0xfe, 0xc9, 0x63,
+       0x15, 0xfb, 0xf5, 0xbd, 0x71, 0x61, 0xeb, 0x7b,
+       0x78, 0x70, 0x07, 0x31, 0x03, 0x9f, 0xb2, 0xc8,
+       0xa7, 0xab, 0x47, 0xfd, 0xdf, 0xa0, 0x78, 0x72,
+       0xa4, 0x2a, 0xe4, 0xb6, 0xba, 0xc0, 0x1e, 0x86,
+       0x71, 0xe6, 0x3d, 0x18, 0x37, 0x70, 0xe6, 0xff,
+       0xe0, 0xbc, 0x0b, 0x22, 0xa0, 0x1f, 0xd3, 0xed,
+       0xa2, 0x55, 0x39, 0xab, 0xa8, 0x13, 0x73, 0x7c,
+       0x3f, 0xb2, 0xd6, 0x19, 0xac, 0xff, 0x99, 0xed,
+       0xe8, 0xe6, 0xa6, 0x22, 0xe3, 0x9c, 0xf1, 0x30,
+       0xdc, 0x01, 0x0a, 0x56, 0xfa, 0xe4, 0xc9, 0x99,
+       0xdd, 0xa8, 0xd8, 0xda, 0x35, 0x51, 0x73, 0xb4,
+       0x40, 0x86, 0x85, 0xdb, 0x5c, 0xd5, 0x85, 0x80,
+       0x14, 0x9c, 0xfd, 0x98, 0xa9, 0x82, 0xc5, 0x37,
+       0xff, 0x32, 0x5d, 0xd0, 0x0b, 0xfa, 0xdc, 0x04,
+       0x5e, 0x09, 0xd2, 0xca, 0x17, 0x4b, 0x1a, 0x8e,
+       0x15, 0xe1, 0xcc, 0x4e, 0x52, 0x88, 0x35, 0xbd,
+       0x48, 0xfe, 0x15, 0xa0, 0x91, 0xfd, 0x7e, 0x6c,
+       0x0e, 0x5d, 0x79, 0x1b, 0x81, 0x79, 0xd2, 0x09,
+       0x34, 0x70, 0x3d, 0x81, 0xec, 0xf6, 0x24, 0xbb,
+       0xfb, 0xf1, 0x7b, 0xdf, 0x54, 0xea, 0x80, 0x9b,
+       0xc7, 0x99, 0x9e, 0xbd, 0x16, 0x78, 0x12, 0x53,
+       0x5e, 0x01, 0xa7, 0x4e, 0xbd, 0x67, 0xe1, 0x9b,
+       0x4c, 0x0e, 0x61, 0x45, 0x97, 0xd2, 0xf0, 0x0f,
+       0xfe, 0x15, 0x08, 0xb7, 0x11, 0x4c, 0xe7, 0xff,
+       0x81, 0x53, 0xff, 0x91, 0x25, 0x38, 0x7e, 0x40,
+       0x94, 0xe5, 0xe0, 0xad, 0xe6, 0xd9, 0x79, 0xb6,
+       0x92, 0xc9, 0xfc, 0xde, 0xc3, 0x1a, 0x23, 0xbb,
+       0xdd, 0xc8, 0x51, 0x0c, 0x3a, 0x72, 0xfa, 0x73,
+       0x6f, 0xb7, 0xee, 0x61, 0x39, 0x03, 0x01, 0x3f,
+       0x7f, 0x94, 0x2e, 0x2e, 0xba, 0x3a, 0xbb, 0xb4,
+       0xfa, 0x6a, 0x17, 0xfe, 0xea, 0xef, 0x5e, 0x66,
+       0x97, 0x3f, 0x32, 0x3d, 0xd7, 0x3e, 0xb1, 0xf1,
+       0x6c, 0x14, 0x4c, 0xfd, 0x37, 0xd3, 0x38, 0x80,
+       0xfb, 0xde, 0xa6, 0x24, 0x1e, 0xc8, 0xca, 0x7f,
+       0x3a, 0x93, 0xd8, 0x8b, 0x18, 0x13, 0xb2, 0xe5,
+       0xe4, 0x93, 0x05, 0x53, 0x4f, 0x84, 0x66, 0xa7,
+       0x58, 0x5c, 0x7b, 0x86, 0x52, 0x6d, 0x0d, 0xce,
+       0xa4, 0x30, 0x7d, 0xb6, 0x18, 0x9f, 0xeb, 0xff,
+       0x22, 0xbb, 0x72, 0x29, 0xb9, 0x44, 0x0b, 0x48,
+       0x1e, 0x84, 0x71, 0x81, 0xe3, 0x6d, 0x73, 0x26,
+       0x92, 0xb4, 0x4d, 0x2a, 0x29, 0xb8, 0x1f, 0x72,
+       0xed, 0xd0, 0xe1, 0x64, 0x77, 0xea, 0x8e, 0x88,
+       0x0f, 0xef, 0x3f, 0xb1, 0x3b, 0xad, 0xf9, 0xc9,
+       0x8b, 0xd0, 0xac, 0xc6, 0xcc, 0xa9, 0x40, 0xcc,
+       0x76, 0xf6, 0x3b, 0x53, 0xb5, 0x88, 0xcb, 0xc8,
+       0x37, 0xf1, 0xa2, 0xba, 0x23, 0x15, 0x99, 0x09,
+       0xcc, 0xe7, 0x7a, 0x3b, 0x37, 0xf7, 0x58, 0xc8,
+       0x46, 0x8c, 0x2b, 0x2f, 0x4e, 0x0e, 0xa6, 0x5c,
+       0xea, 0x85, 0x55, 0xba, 0x02, 0x0e, 0x0e, 0x48,
+       0xbc, 0xe1, 0xb1, 0x01, 0x35, 0x79, 0x13, 0x3d,
+       0x1b, 0xc0, 0x53, 0x68, 0x11, 0xe7, 0x95, 0x0f,
+       0x9d, 0x3f, 0x4c, 0x47, 0x7b, 0x4d, 0x1c, 0xae,
+       0x50, 0x9b, 0xcb, 0xdd, 0x05, 0x8d, 0x9a, 0x97,
+       0xfd, 0x8c, 0xef, 0x0c, 0x1d, 0x67, 0x73, 0xa8,
+       0x28, 0x36, 0xd5, 0xb6, 0x92, 0x33, 0x40, 0x75,
+       0x0b, 0x51, 0xc3, 0x64, 0xba, 0x1d, 0xc2, 0xcc,
+       0xee, 0x7d, 0x54, 0x0f, 0x27, 0x69, 0xa7, 0x27,
+       0x63, 0x30, 0x29, 0xd9, 0xc8, 0x84, 0xd8, 0xdf,
+       0x9f, 0x68, 0x8d, 0x04, 0xca, 0xa6, 0xc5, 0xc7,
+       0x7a, 0x5c, 0xc8, 0xd1, 0xcb, 0x4a, 0xec, 0xd0,
+       0xd8, 0x20, 0x69, 0xc5, 0x17, 0xcd, 0x78, 0xc8,
+       0x75, 0x23, 0x30, 0x69, 0xc9, 0xd4, 0xea, 0x5c,
+       0x4f, 0x6b, 0x86, 0x3f, 0x8b, 0xfe, 0xee, 0x44,
+       0xc9, 0x7c, 0xb7, 0xdd, 0x3e, 0xe5, 0xec, 0x54,
+       0x03, 0x3e, 0xaa, 0x82, 0xc6, 0xdf, 0xb2, 0x38,
+       0x0e, 0x5d, 0xb3, 0x88, 0xd9, 0xd3, 0x69, 0x5f,
+       0x8f, 0x70, 0x8a, 0x7e, 0x11, 0xd9, 0x1e, 0x7b,
+       0x38, 0xf1, 0x42, 0x1a, 0xc0, 0x35, 0xf5, 0xc7,
+       0x36, 0x85, 0xf5, 0xf7, 0xb8, 0x7e, 0xc7, 0xef,
+       0x18, 0xf1, 0x63, 0xd6, 0x7a, 0xc6, 0xc9, 0x0e,
+       0x4d, 0x69, 0x4f, 0x84, 0xef, 0x26, 0x41, 0x0c,
+       0xec, 0xc7, 0xe0, 0x7e, 0x3c, 0x67, 0x01, 0x4c,
+       0x62, 0x1a, 0x20, 0x6f, 0xee, 0x47, 0x4d, 0xc0,
+       0x99, 0x13, 0x8d, 0x91, 0x4a, 0x26, 0xd4, 0x37,
+       0x28, 0x90, 0x58, 0x75, 0x66, 0x2b, 0x0a, 0xdf,
+       0xda, 0xee, 0x92, 0x25, 0x90, 0x62, 0x39, 0x9e,
+       0x44, 0x98, 0xad, 0xc1, 0x88, 0xed, 0xe4, 0xb4,
+       0xaf, 0xf5, 0x8c, 0x9b, 0x48, 0x4d, 0x56, 0x60,
+       0x97, 0x0f, 0x61, 0x59, 0x9e, 0xa6, 0x27, 0xfe,
+       0xc1, 0x91, 0x15, 0x38, 0xb8, 0x0f, 0xae, 0x61,
+       0x7d, 0x26, 0x13, 0x5a, 0x73, 0xff, 0x1c, 0xa3,
+       0x61, 0x04, 0x58, 0x48, 0x55, 0x44, 0x11, 0xfe,
+       0x15, 0xca, 0xc3, 0xbd, 0xca, 0xc5, 0xb4, 0x40,
+       0x5d, 0x1b, 0x7f, 0x39, 0xb5, 0x9c, 0x35, 0xec,
+       0x61, 0x15, 0x32, 0x32, 0xb8, 0x4e, 0x40, 0x9f,
+       0x17, 0x1f, 0x0a, 0x4d, 0xa9, 0x91, 0xef, 0xb7,
+       0xb0, 0xeb, 0xc2, 0x83, 0x9a, 0x6c, 0xd2, 0x79,
+       0x43, 0x78, 0x5e, 0x2f, 0xe5, 0xdd, 0x1a, 0x3c,
+       0x45, 0xab, 0x29, 0x40, 0x3a, 0x37, 0x5b, 0x6f,
+       0xd7, 0xfc, 0x48, 0x64, 0x3c, 0x49, 0xfb, 0x21,
+       0xbe, 0xc3, 0xff, 0x07, 0xfb, 0x17, 0xe9, 0xc9,
+       0x0c, 0x4c, 0x5c, 0x15, 0x9e, 0x8e, 0x22, 0x30,
+       0x0a, 0xde, 0x48, 0x7f, 0xdb, 0x0d, 0xd1, 0x2b,
+       0x87, 0x38, 0x9e, 0xcc, 0x5a, 0x01, 0x16, 0xee,
+       0x75, 0x49, 0x0d, 0x30, 0x01, 0x34, 0x6a, 0xb6,
+       0x9a, 0x5a, 0x2a, 0xec, 0xbb, 0x48, 0xac, 0xd3,
+       0x77, 0x83, 0xd8, 0x08, 0x86, 0x4f, 0x48, 0x09,
+       0x29, 0x41, 0x79, 0xa1, 0x03, 0x12, 0xc4, 0xcd,
+       0x90, 0x55, 0x47, 0x66, 0x74, 0x9a, 0xcc, 0x4f,
+       0x35, 0x8c, 0xd6, 0x98, 0xef, 0xeb, 0x45, 0xb9,
+       0x9a, 0x26, 0x2f, 0x39, 0xa5, 0x70, 0x6d, 0xfc,
+       0xb4, 0x51, 0xee, 0xf4, 0x9c, 0xe7, 0x38, 0x59,
+       0xad, 0xf4, 0xbc, 0x46, 0xff, 0x46, 0x8e, 0x60,
+       0x9c, 0xa3, 0x60, 0x1d, 0xf8, 0x26, 0x72, 0xf5,
+       0x72, 0x9d, 0x68, 0x80, 0x04, 0xf6, 0x0b, 0xa1,
+       0x0a, 0xd5, 0xa7, 0x82, 0x3a, 0x3e, 0x47, 0xa8,
+       0x5a, 0xde, 0x59, 0x4f, 0x7b, 0x07, 0xb3, 0xe9,
+       0x24, 0x19, 0x3d, 0x34, 0x05, 0xec, 0xf1, 0xab,
+       0x6e, 0x64, 0x8f, 0xd3, 0xe6, 0x41, 0x86, 0x80,
+       0x70, 0xe3, 0x8d, 0x60, 0x9c, 0x34, 0x25, 0x01,
+       0x07, 0x4d, 0x19, 0x41, 0x4e, 0x3d, 0x5c, 0x7e,
+       0xa8, 0xf5, 0xcc, 0xd5, 0x7b, 0xe2, 0x7d, 0x3d,
+       0x49, 0x86, 0x7d, 0x07, 0xb7, 0x10, 0xe3, 0x35,
+       0xb8, 0x84, 0x6d, 0x76, 0xab, 0x17, 0xc6, 0x38,
+       0xb4, 0xd3, 0x28, 0x57, 0xad, 0xd3, 0x88, 0x5a,
+       0xda, 0xea, 0xc8, 0x94, 0xcc, 0x37, 0x19, 0xac,
+       0x9c, 0x9f, 0x4b, 0x00, 0x15, 0xc0, 0xc8, 0xca,
+       0x1f, 0x15, 0xaa, 0xe0, 0xdb, 0xf9, 0x2f, 0x57,
+       0x1b, 0x24, 0xc7, 0x6f, 0x76, 0x29, 0xfb, 0xed,
+       0x25, 0x0d, 0xc0, 0xfe, 0xbd, 0x5a, 0xbf, 0x20,
+       0x08, 0x51, 0x05, 0xec, 0x71, 0xa3, 0xbf, 0xef,
+       0x5e, 0x99, 0x75, 0xdb, 0x3c, 0x5f, 0x9a, 0x8c,
+       0xbb, 0x19, 0x5c, 0x0e, 0x93, 0x19, 0xf8, 0x6a,
+       0xbc, 0xf2, 0x12, 0x54, 0x2f, 0xcb, 0x28, 0x64,
+       0x88, 0xb3, 0x92, 0x0d, 0x96, 0xd1, 0xa6, 0xe4,
+       0x1f, 0xf1, 0x4d, 0xa4, 0xab, 0x1c, 0xee, 0x54,
+       0xf2, 0xad, 0x29, 0x6d, 0x32, 0x37, 0xb2, 0x16,
+       0x77, 0x5c, 0xdc, 0x2e, 0x54, 0xec, 0x75, 0x26,
+       0xc6, 0x36, 0xd9, 0x17, 0x2c, 0xf1, 0x7a, 0xdc,
+       0x4b, 0xf1, 0xe2, 0xd9, 0x95, 0xba, 0xac, 0x87,
+       0xc1, 0xf3, 0x8e, 0x58, 0x08, 0xd8, 0x87, 0x60,
+       0xc9, 0xee, 0x6a, 0xde, 0xa4, 0xd2, 0xfc, 0x0d,
+       0xe5, 0x36, 0xc4, 0x5c, 0x52, 0xb3, 0x07, 0x54,
+       0x65, 0x24, 0xc1, 0xb1, 0xd1, 0xb1, 0x53, 0x13,
+       0x31, 0x79, 0x7f, 0x05, 0x76, 0xeb, 0x37, 0x59,
+       0x15, 0x2b, 0xd1, 0x3f, 0xac, 0x08, 0x97, 0xeb,
+       0x91, 0x98, 0xdf, 0x6c, 0x09, 0x0d, 0x04, 0x9f,
+       0xdc, 0x3b, 0x0e, 0x60, 0x68, 0x47, 0x23, 0x15,
+       0x16, 0xc6, 0x0b, 0x35, 0xf8, 0x77, 0xa2, 0x78,
+       0x50, 0xd4, 0x64, 0x22, 0x33, 0xff, 0xfb, 0x93,
+       0x71, 0x46, 0x50, 0x39, 0x1b, 0x9c, 0xea, 0x4e,
+       0x8d, 0x0c, 0x37, 0xe5, 0x5c, 0x51, 0x3a, 0x31,
+       0xb2, 0x85, 0x84, 0x3f, 0x41, 0xee, 0xa2, 0xc1,
+       0xc6, 0x13, 0x3b, 0x54, 0x28, 0xd2, 0x18, 0x37,
+       0xcc, 0x46, 0x9f, 0x6a, 0x91, 0x3d, 0x5a, 0x15,
+       0x3c, 0x89, 0xa3, 0x61, 0x06, 0x7d, 0x2e, 0x78,
+       0xbe, 0x7d, 0x40, 0xba, 0x2f, 0x95, 0xb1, 0x2f,
+       0x87, 0x3b, 0x8a, 0xbe, 0x6a, 0xf4, 0xc2, 0x31,
+       0x74, 0xee, 0x91, 0xe0, 0x23, 0xaa, 0x5d, 0x7f,
+       0xdd, 0xf0, 0x44, 0x8c, 0x0b, 0x59, 0x2b, 0xfc,
+       0x48, 0x3a, 0xdf, 0x07, 0x05, 0x38, 0x6c, 0xc9,
+       0xeb, 0x18, 0x24, 0x68, 0x8d, 0x58, 0x98, 0xd3,
+       0x31, 0xa3, 0xe4, 0x70, 0x59, 0xb1, 0x21, 0xbe,
+       0x7e, 0x65, 0x7d, 0xb8, 0x04, 0xab, 0xf6, 0xe4,
+       0xd7, 0xda, 0xec, 0x09, 0x8f, 0xda, 0x6d, 0x24,
+       0x07, 0xcc, 0x29, 0x17, 0x05, 0x78, 0x1a, 0xc1,
+       0xb1, 0xce, 0xfc, 0xaa, 0x2d, 0xe7, 0xcc, 0x85,
+       0x84, 0x84, 0x03, 0x2a, 0x0c, 0x3f, 0xa9, 0xf8,
+       0xfd, 0x84, 0x53, 0x59, 0x5c, 0xf0, 0xd4, 0x09,
+       0xf0, 0xd2, 0x6c, 0x32, 0x03, 0xb0, 0xa0, 0x8c,
+       0x52, 0xeb, 0x23, 0x91, 0x88, 0x43, 0x13, 0x46,
+       0xf6, 0x1e, 0xb4, 0x1b, 0xf5, 0x8e, 0x3a, 0xb5,
+       0x3d, 0x00, 0xf6, 0xe5, 0x08, 0x3d, 0x5f, 0x39,
+       0xd3, 0x21, 0x69, 0xbc, 0x03, 0x22, 0x3a, 0xd2,
+       0x5c, 0x84, 0xf8, 0x15, 0xc4, 0x80, 0x0b, 0xbc,
+       0x29, 0x3c, 0xf3, 0x95, 0x98, 0xcd, 0x8f, 0x35,
+       0xbc, 0xa5, 0x3e, 0xfc, 0xd4, 0x13, 0x9e, 0xde,
+       0x4f, 0xce, 0x71, 0x9d, 0x09, 0xad, 0xf2, 0x80,
+       0x6b, 0x65, 0x7f, 0x03, 0x00, 0x14, 0x7c, 0x15,
+       0x85, 0x40, 0x6d, 0x70, 0xea, 0xdc, 0xb3, 0x63,
+       0x35, 0x4f, 0x4d, 0xe0, 0xd9, 0xd5, 0x3c, 0x58,
+       0x56, 0x23, 0x80, 0xe2, 0x36, 0xdd, 0x75, 0x1d,
+       0x94, 0x11, 0x41, 0x8e, 0xe0, 0x81, 0x8e, 0xcf,
+       0xe0, 0xe5, 0xf6, 0xde, 0xd1, 0xe7, 0x04, 0x12,
+       0x79, 0x92, 0x2b, 0x71, 0x2a, 0x79, 0x8b, 0x7c,
+       0x44, 0x79, 0x16, 0x30, 0x4e, 0xf4, 0xf6, 0x9b,
+       0xb7, 0x40, 0xa3, 0x5a, 0xa7, 0x69, 0x3e, 0xc1,
+       0x3a, 0x04, 0xd0, 0x88, 0xa0, 0x3b, 0xdd, 0xc6,
+       0x9e, 0x7e, 0x1e, 0x1e, 0x8f, 0x44, 0xf7, 0x73,
+       0x67, 0x1e, 0x1a, 0x78, 0xfa, 0x62, 0xf4, 0xa9,
+       0xa8, 0xc6, 0x5b, 0xb8, 0xfa, 0x06, 0x7d, 0x5e,
+       0x38, 0x1c, 0x9a, 0x39, 0xe9, 0x39, 0x98, 0x22,
+       0x0b, 0xa7, 0xac, 0x0b, 0xf3, 0xbc, 0xf1, 0xeb,
+       0x8c, 0x81, 0xe3, 0x48, 0x8a, 0xed, 0x42, 0xc2,
+       0x38, 0xcf, 0x3e, 0xda, 0xd2, 0x89, 0x8d, 0x9c,
+       0x53, 0xb5, 0x2f, 0x41, 0x01, 0x26, 0x84, 0x9c,
+       0xa3, 0x56, 0xf6, 0x49, 0xc7, 0xd4, 0x9f, 0x93,
+       0x1b, 0x96, 0x49, 0x5e, 0xad, 0xb3, 0x84, 0x1f,
+       0x3c, 0xa4, 0xe0, 0x9b, 0xd1, 0x90, 0xbc, 0x38,
+       0x6c, 0xdd, 0x95, 0x4d, 0x9d, 0xb1, 0x71, 0x57,
+       0x2d, 0x34, 0xe8, 0xb8, 0x42, 0xc7, 0x99, 0x03,
+       0xc7, 0x07, 0x30, 0x65, 0x91, 0x55, 0xd5, 0x90,
+       0x70, 0x97, 0x37, 0x68, 0xd4, 0x11, 0xf9, 0xe8,
+       0xce, 0xec, 0xdc, 0x34, 0xd5, 0xd3, 0xb7, 0xc4,
+       0xb8, 0x97, 0x05, 0x92, 0xad, 0xf8, 0xe2, 0x36,
+       0x64, 0x41, 0xc9, 0xc5, 0x41, 0x77, 0x52, 0xd7,
+       0x2c, 0xa5, 0x24, 0x2f, 0xd9, 0x34, 0x0b, 0x47,
+       0x35, 0xa7, 0x28, 0x8b, 0xc5, 0xcd, 0xe9, 0x46,
+       0xac, 0x39, 0x94, 0x3c, 0x10, 0xc6, 0x29, 0x73,
+       0x0e, 0x0e, 0x5d, 0xe0, 0x71, 0x03, 0x8a, 0x72,
+       0x0e, 0x26, 0xb0, 0x7d, 0x84, 0xed, 0x95, 0x23,
+       0x49, 0x5a, 0x45, 0x83, 0x45, 0x60, 0x11, 0x4a,
+       0x46, 0x31, 0xd4, 0xd8, 0x16, 0x54, 0x98, 0x58,
+       0xed, 0x6d, 0xcc, 0x5d, 0xd6, 0x50, 0x61, 0x9f,
+       0x9d, 0xc5, 0x3e, 0x9d, 0x32, 0x47, 0xde, 0x96,
+       0xe1, 0x5d, 0xd8, 0xf8, 0xb4, 0x69, 0x6f, 0xb9,
+       0x15, 0x90, 0x57, 0x7a, 0xf6, 0xad, 0xb0, 0x5b,
+       0xf5, 0xa6, 0x36, 0x94, 0xfd, 0x84, 0xce, 0x1c,
+       0x0f, 0x4b, 0xd0, 0xc2, 0x5b, 0x6b, 0x56, 0xef,
+       0x73, 0x93, 0x0b, 0xc3, 0xee, 0xd9, 0xcf, 0xd3,
+       0xa4, 0x22, 0x58, 0xcd, 0x50, 0x6e, 0x65, 0xf4,
+       0xe9, 0xb7, 0x71, 0xaf, 0x4b, 0xb3, 0xb6, 0x2f,
+       0x0f, 0x0e, 0x3b, 0xc9, 0x85, 0x14, 0xf5, 0x17,
+       0xe8, 0x7a, 0x3a, 0xbf, 0x5f, 0x5e, 0xf8, 0x18,
+       0x48, 0xa6, 0x72, 0xab, 0x06, 0x95, 0xe9, 0xc8,
+       0xa7, 0xf4, 0x32, 0x44, 0x04, 0x0c, 0x84, 0x98,
+       0x73, 0xe3, 0x89, 0x8d, 0x5f, 0x7e, 0x4a, 0x42,
+       0x8f, 0xc5, 0x28, 0xb1, 0x82, 0xef, 0x1c, 0x97,
+       0x31, 0x3b, 0x4d, 0xe0, 0x0e, 0x10, 0x10, 0x97,
+       0x93, 0x49, 0x78, 0x2f, 0x0d, 0x86, 0x8b, 0xa1,
+       0x53, 0xa9, 0x81, 0x20, 0x79, 0xe7, 0x07, 0x77,
+       0xb6, 0xac, 0x5e, 0xd2, 0x05, 0xcd, 0xe9, 0xdb,
+       0x8a, 0x94, 0x82, 0x8a, 0x23, 0xb9, 0x3d, 0x1c,
+       0xa9, 0x7d, 0x72, 0x4a, 0xed, 0x33, 0xa3, 0xdb,
+       0x21, 0xa7, 0x86, 0x33, 0x45, 0xa5, 0xaa, 0x56,
+       0x45, 0xb5, 0x83, 0x29, 0x40, 0x47, 0x79, 0x04,
+       0x6e, 0xb9, 0x95, 0xd0, 0x81, 0x77, 0x2d, 0x48,
+       0x1e, 0xfe, 0xc3, 0xc2, 0x1e, 0xe5, 0xf2, 0xbe,
+       0xfd, 0x3b, 0x94, 0x9f, 0xc4, 0xc4, 0x26, 0x9d,
+       0xe4, 0x66, 0x1e, 0x19, 0xee, 0x6c, 0x79, 0x97,
+       0x11, 0x31, 0x4b, 0x0d, 0x01, 0xcb, 0xde, 0xa8,
+       0xf6, 0x6d, 0x7c, 0x39, 0x46, 0x4e, 0x7e, 0x3f,
+       0x94, 0x17, 0xdf, 0xa1, 0x7d, 0xd9, 0x1c, 0x8e,
+       0xbc, 0x7d, 0x33, 0x7d, 0xe3, 0x12, 0x40, 0xca,
+       0xab, 0x37, 0x11, 0x46, 0xd4, 0xae, 0xef, 0x44,
+       0xa2, 0xb3, 0x6a, 0x66, 0x0e, 0x0c, 0x90, 0x7f,
+       0xdf, 0x5c, 0x66, 0x5f, 0xf2, 0x94, 0x9f, 0xa6,
+       0x73, 0x4f, 0xeb, 0x0d, 0xad, 0xbf, 0xc0, 0x63,
+       0x5c, 0xdc, 0x46, 0x51, 0xe8, 0x8e, 0x90, 0x19,
+       0xa8, 0xa4, 0x3c, 0x91, 0x79, 0xfa, 0x7e, 0x58,
+       0x85, 0x13, 0x55, 0xc5, 0x19, 0x82, 0x37, 0x1b,
+       0x0a, 0x02, 0x1f, 0x99, 0x6b, 0x18, 0xf1, 0x28,
+       0x08, 0xa2, 0x73, 0xb8, 0x0f, 0x2e, 0xcd, 0xbf,
+       0xf3, 0x86, 0x7f, 0xea, 0xef, 0xd0, 0xbb, 0xa6,
+       0x21, 0xdf, 0x49, 0x73, 0x51, 0xcc, 0x36, 0xd3,
+       0x3e, 0xa0, 0xf8, 0x44, 0xdf, 0xd3, 0xa6, 0xbe,
+       0x8a, 0xd4, 0x57, 0xdd, 0x72, 0x94, 0x61, 0x0f,
+       0x82, 0xd1, 0x07, 0xb8, 0x7c, 0x18, 0x83, 0xdf,
+       0x3a, 0xe5, 0x50, 0x6a, 0x82, 0x20, 0xac, 0xa9,
+       0xa8, 0xff, 0xd9, 0xf3, 0x77, 0x33, 0x5a, 0x9e,
+       0x7f, 0x6d, 0xfe, 0x5d, 0x33, 0x41, 0x42, 0xe7,
+       0x6c, 0x19, 0xe0, 0x44, 0x8a, 0x15, 0xf6, 0x70,
+       0x98, 0xb7, 0x68, 0x4d, 0xfa, 0x97, 0x39, 0xb0,
+       0x8e, 0xe8, 0x84, 0x8b, 0x75, 0x30, 0xb7, 0x7d,
+       0x92, 0x69, 0x20, 0x9c, 0x81, 0xfb, 0x4b, 0xf4,
+       0x01, 0x50, 0xeb, 0xce, 0x0c, 0x1c, 0x6c, 0xb5,
+       0x4a, 0xd7, 0x27, 0x0c, 0xce, 0xbb, 0xe5, 0x85,
+       0xf0, 0xb6, 0xee, 0xd5, 0x70, 0xdd, 0x3b, 0xfc,
+       0xd4, 0x99, 0xf1, 0x33, 0xdd, 0x8b, 0xc4, 0x2f,
+       0xae, 0xab, 0x74, 0x96, 0x32, 0xc7, 0x4c, 0x56,
+       0x3c, 0x89, 0x0f, 0x96, 0x0b, 0x42, 0xc0, 0xcb,
+       0xee, 0x0f, 0x0b, 0x8c, 0xfb, 0x7e, 0x47, 0x7b,
+       0x64, 0x48, 0xfd, 0xb2, 0x00, 0x80, 0x89, 0xa5,
+       0x13, 0x55, 0x62, 0xfc, 0x8f, 0xe2, 0x42, 0x03,
+       0xb7, 0x4e, 0x2a, 0x79, 0xb4, 0x82, 0xea, 0x23,
+       0x49, 0xda, 0xaf, 0x52, 0x63, 0x1e, 0x60, 0x03,
+       0x89, 0x06, 0x44, 0x46, 0x08, 0xc3, 0xc4, 0x87,
+       0x70, 0x2e, 0xda, 0x94, 0xad, 0x6b, 0xe0, 0xe4,
+       0xd1, 0x8a, 0x06, 0xc2, 0xa8, 0xc0, 0xa7, 0x43,
+       0x3c, 0x47, 0x52, 0x0e, 0xc3, 0x77, 0x81, 0x11,
+       0x67, 0x0e, 0xa0, 0x70, 0x04, 0x47, 0x29, 0x40,
+       0x86, 0x0d, 0x34, 0x56, 0xa7, 0xc9, 0x35, 0x59,
+       0x68, 0xdc, 0x93, 0x81, 0x70, 0xee, 0x86, 0xd9,
+       0x80, 0x06, 0x40, 0x4f, 0x1a, 0x0d, 0x40, 0x30,
+       0x0b, 0xcb, 0x96, 0x47, 0xc1, 0xb7, 0x52, 0xfd,
+       0x56, 0xe0, 0x72, 0x4b, 0xfb, 0xbd, 0x92, 0x45,
+       0x61, 0x71, 0xc2, 0x33, 0x11, 0xbf, 0x52, 0x83,
+       0x79, 0x26, 0xe0, 0x49, 0x6b, 0xb7, 0x05, 0x8b,
+       0xe8, 0x0e, 0x87, 0x31, 0xd7, 0x9d, 0x8a, 0xf5,
+       0xc0, 0x5f, 0x2e, 0x58, 0x4a, 0xdb, 0x11, 0xb3,
+       0x6c, 0x30, 0x2a, 0x46, 0x19, 0xe3, 0x27, 0x84,
+       0x1f, 0x63, 0x6e, 0xf6, 0x57, 0xc7, 0xc9, 0xd8,
+       0x5e, 0xba, 0xb3, 0x87, 0xd5, 0x83, 0x26, 0x34,
+       0x21, 0x9e, 0x65, 0xde, 0x42, 0xd3, 0xbe, 0x7b,
+       0xbc, 0x91, 0x71, 0x44, 0x4d, 0x99, 0x3b, 0x31,
+       0xe5, 0x3f, 0x11, 0x4e, 0x7f, 0x13, 0x51, 0x3b,
+       0xae, 0x79, 0xc9, 0xd3, 0x81, 0x8e, 0x25, 0x40,
+       0x10, 0xfc, 0x07, 0x1e, 0xf9, 0x7b, 0x9a, 0x4b,
+       0x6c, 0xe3, 0xb3, 0xad, 0x1a, 0x0a, 0xdd, 0x9e,
+       0x59, 0x0c, 0xa2, 0xcd, 0xae, 0x48, 0x4a, 0x38,
+       0x5b, 0x47, 0x41, 0x94, 0x65, 0x6b, 0xbb, 0xeb,
+       0x5b, 0xe3, 0xaf, 0x07, 0x5b, 0xd4, 0x4a, 0xa2,
+       0xc9, 0x5d, 0x2f, 0x64, 0x03, 0xd7, 0x3a, 0x2c,
+       0x6e, 0xce, 0x76, 0x95, 0xb4, 0xb3, 0xc0, 0xf1,
+       0xe2, 0x45, 0x73, 0x7a, 0x5c, 0xab, 0xc1, 0xfc,
+       0x02, 0x8d, 0x81, 0x29, 0xb3, 0xac, 0x07, 0xec,
+       0x40, 0x7d, 0x45, 0xd9, 0x7a, 0x59, 0xee, 0x34,
+       0xf0, 0xe9, 0xd5, 0x7b, 0x96, 0xb1, 0x3d, 0x95,
+       0xcc, 0x86, 0xb5, 0xb6, 0x04, 0x2d, 0xb5, 0x92,
+       0x7e, 0x76, 0xf4, 0x06, 0xa9, 0xa3, 0x12, 0x0f,
+       0xb1, 0xaf, 0x26, 0xba, 0x7c, 0xfc, 0x7e, 0x1c,
+       0xbc, 0x2c, 0x49, 0x97, 0x53, 0x60, 0x13, 0x0b,
+       0xa6, 0x61, 0x83, 0x89, 0x42, 0xd4, 0x17, 0x0c,
+       0x6c, 0x26, 0x52, 0xc3, 0xb3, 0xd4, 0x67, 0xf5,
+       0xe3, 0x04, 0xb7, 0xf4, 0xcb, 0x80, 0xb8, 0xcb,
+       0x77, 0x56, 0x3e, 0xaa, 0x57, 0x54, 0xee, 0xb4,
+       0x2c, 0x67, 0xcf, 0xf2, 0xdc, 0xbe, 0x55, 0xf9,
+       0x43, 0x1f, 0x6e, 0x22, 0x97, 0x67, 0x7f, 0xc4,
+       0xef, 0xb1, 0x26, 0x31, 0x1e, 0x27, 0xdf, 0x41,
+       0x80, 0x47, 0x6c, 0xe2, 0xfa, 0xa9, 0x8c, 0x2a,
+       0xf6, 0xf2, 0xab, 0xf0, 0x15, 0xda, 0x6c, 0xc8,
+       0xfe, 0xb5, 0x23, 0xde, 0xa9, 0x05, 0x3f, 0x06,
+       0x54, 0x4c, 0xcd, 0xe1, 0xab, 0xfc, 0x0e, 0x62,
+       0x33, 0x31, 0x73, 0x2c, 0x76, 0xcb, 0xb4, 0x47,
+       0x1e, 0x20, 0xad, 0xd8, 0xf2, 0x31, 0xdd, 0xc4,
+       0x8b, 0x0c, 0x77, 0xbe, 0xe1, 0x8b, 0x26, 0x00,
+       0x02, 0x58, 0xd6, 0x8d, 0xef, 0xad, 0x74, 0x67,
+       0xab, 0x3f, 0xef, 0xcb, 0x6f, 0xb0, 0xcc, 0x81,
+       0x44, 0x4c, 0xaf, 0xe9, 0x49, 0x4f, 0xdb, 0xa0,
+       0x25, 0xa4, 0xf0, 0x89, 0xf1, 0xbe, 0xd8, 0x10,
+       0xff, 0xb1, 0x3b, 0x4b, 0xfa, 0x98, 0xf5, 0x79,
+       0x6d, 0x1e, 0x69, 0x4d, 0x57, 0xb1, 0xc8, 0x19,
+       0x1b, 0xbd, 0x1e, 0x8c, 0x84, 0xb7, 0x7b, 0xe8,
+       0xd2, 0x2d, 0x09, 0x41, 0x41, 0x37, 0x3d, 0xb1,
+       0x6f, 0x26, 0x5d, 0x71, 0x16, 0x3d, 0xb7, 0x83,
+       0x27, 0x2c, 0xa7, 0xb6, 0x50, 0xbd, 0x91, 0x86,
+       0xab, 0x24, 0xa1, 0x38, 0xfd, 0xea, 0x71, 0x55,
+       0x7e, 0x9a, 0x07, 0x77, 0x4b, 0xfa, 0x61, 0x66,
+       0x20, 0x1e, 0x28, 0x95, 0x18, 0x1b, 0xa4, 0xa0,
+       0xfd, 0xc0, 0x89, 0x72, 0x43, 0xd9, 0x3b, 0x49,
+       0x5a, 0x3f, 0x9d, 0xbf, 0xdb, 0xb4, 0x46, 0xea,
+       0x42, 0x01, 0x77, 0x23, 0x68, 0x95, 0xb6, 0x24,
+       0xb3, 0xa8, 0x6c, 0x28, 0x3b, 0x11, 0x40, 0x7e,
+       0x18, 0x65, 0x6d, 0xd8, 0x24, 0x42, 0x7d, 0x88,
+       0xc0, 0x52, 0xd9, 0x05, 0xe4, 0x95, 0x90, 0x87,
+       0x8c, 0xf4, 0xd0, 0x6b, 0xb9, 0x83, 0x99, 0x34,
+       0x6d, 0xfe, 0x54, 0x40, 0x94, 0x52, 0x21, 0x4f,
+       0x14, 0x25, 0xc5, 0xd6, 0x5e, 0x95, 0xdc, 0x0a,
+       0x2b, 0x89, 0x20, 0x11, 0x84, 0x48, 0xd6, 0x3a,
+       0xcd, 0x5c, 0x24, 0xad, 0x62, 0xe3, 0xb1, 0x93,
+       0x25, 0x8d, 0xcd, 0x7e, 0xfc, 0x27, 0xa3, 0x37,
+       0xfd, 0x84, 0xfc, 0x1b, 0xb2, 0xf1, 0x27, 0x38,
+       0x5a, 0xb7, 0xfc, 0xf2, 0xfa, 0x95, 0x66, 0xd4,
+       0xfb, 0xba, 0xa7, 0xd7, 0xa3, 0x72, 0x69, 0x48,
+       0x48, 0x8c, 0xeb, 0x28, 0x89, 0xfe, 0x33, 0x65,
+       0x5a, 0x36, 0x01, 0x7e, 0x06, 0x79, 0x0a, 0x09,
+       0x3b, 0x74, 0x11, 0x9a, 0x6e, 0xbf, 0xd4, 0x9e,
+       0x58, 0x90, 0x49, 0x4f, 0x4d, 0x08, 0xd4, 0xe5,
+       0x4a, 0x09, 0x21, 0xef, 0x8b, 0xb8, 0x74, 0x3b,
+       0x91, 0xdd, 0x36, 0x85, 0x60, 0x2d, 0xfa, 0xd4,
+       0x45, 0x7b, 0x45, 0x53, 0xf5, 0x47, 0x87, 0x7e,
+       0xa6, 0x37, 0xc8, 0x78, 0x7a, 0x68, 0x9d, 0x8d,
+       0x65, 0x2c, 0x0e, 0x91, 0x5c, 0xa2, 0x60, 0xf0,
+       0x8e, 0x3f, 0xe9, 0x1a, 0xcd, 0xaa, 0xe7, 0xd5,
+       0x77, 0x18, 0xaf, 0xc9, 0xbc, 0x18, 0xea, 0x48,
+       0x1b, 0xfb, 0x22, 0x48, 0x70, 0x16, 0x29, 0x9e,
+       0x5b, 0xc1, 0x2c, 0x66, 0x23, 0xbc, 0xf0, 0x1f,
+       0xef, 0xaf, 0xe4, 0xd6, 0x04, 0x19, 0x82, 0x7a,
+       0x0b, 0xba, 0x4b, 0x46, 0xb1, 0x6a, 0x85, 0x5d,
+       0xb4, 0x73, 0xd6, 0x21, 0xa1, 0x71, 0x60, 0x14,
+       0xee, 0x0a, 0x77, 0xc4, 0x66, 0x2e, 0xf9, 0x69,
+       0x30, 0xaf, 0x41, 0x0b, 0xc8, 0x83, 0x3c, 0x53,
+       0x99, 0x19, 0x27, 0x46, 0xf7, 0x41, 0x6e, 0x56,
+       0xdc, 0x94, 0x28, 0x67, 0x4e, 0xb7, 0x25, 0x48,
+       0x8a, 0xc2, 0xe0, 0x60, 0x96, 0xcc, 0x18, 0xf4,
+       0x84, 0xdd, 0xa7, 0x5e, 0x3e, 0x05, 0x0b, 0x26,
+       0x26, 0xb2, 0x5c, 0x1f, 0x57, 0x1a, 0x04, 0x7e,
+       0x6a, 0xe3, 0x2f, 0xb4, 0x35, 0xb6, 0x38, 0x40,
+       0x40, 0xcd, 0x6f, 0x87, 0x2e, 0xef, 0xa3, 0xd7,
+       0xa9, 0xc2, 0xe8, 0x0d, 0x27, 0xdf, 0x44, 0x62,
+       0x99, 0xa0, 0xfc, 0xcf, 0x81, 0x78, 0xcb, 0xfe,
+       0xe5, 0xa0, 0x03, 0x4e, 0x6c, 0xd7, 0xf4, 0xaf,
+       0x7a, 0xbb, 0x61, 0x82, 0xfe, 0x71, 0x89, 0xb2,
+       0x22, 0x7c, 0x8e, 0x83, 0x04, 0xce, 0xf6, 0x5d,
+       0x84, 0x8f, 0x95, 0x6a, 0x7f, 0xad, 0xfd, 0x32,
+       0x9c, 0x5e, 0xe4, 0x9c, 0x89, 0x60, 0x54, 0xaa,
+       0x96, 0x72, 0xd2, 0xd7, 0x36, 0x85, 0xa9, 0x45,
+       0xd2, 0x2a, 0xa1, 0x81, 0x49, 0x6f, 0x7e, 0x04,
+       0xfa, 0xe2, 0xfe, 0x90, 0x26, 0x77, 0x5a, 0x33,
+       0xb8, 0x04, 0x9a, 0x7a, 0xe6, 0x4c, 0x4f, 0xad,
+       0x72, 0x96, 0x08, 0x28, 0x58, 0x13, 0xf8, 0xc4,
+       0x1c, 0xf0, 0xc3, 0x45, 0x95, 0x49, 0x20, 0x8c,
+       0x9f, 0x39, 0x70, 0xe1, 0x77, 0xfe, 0xd5, 0x4b,
+       0xaf, 0x86, 0xda, 0xef, 0x22, 0x06, 0x83, 0x36,
+       0x29, 0x12, 0x11, 0x40, 0xbc, 0x3b, 0x86, 0xaa,
+       0xaa, 0x65, 0x60, 0xc3, 0x80, 0xca, 0xed, 0xa9,
+       0xf3, 0xb0, 0x79, 0x96, 0xa2, 0x55, 0x27, 0x28,
+       0x55, 0x73, 0x26, 0xa5, 0x50, 0xea, 0x92, 0x4b,
+       0x3c, 0x5c, 0x82, 0x33, 0xf0, 0x01, 0x3f, 0x03,
+       0xc1, 0x08, 0x05, 0xbf, 0x98, 0xf4, 0x9b, 0x6d,
+       0xa5, 0xa8, 0xb4, 0x82, 0x0c, 0x06, 0xfa, 0xff,
+       0x2d, 0x08, 0xf3, 0x05, 0x4f, 0x57, 0x2a, 0x39,
+       0xd4, 0x83, 0x0d, 0x75, 0x51, 0xd8, 0x5b, 0x1b,
+       0xd3, 0x51, 0x5a, 0x32, 0x2a, 0x9b, 0x32, 0xb2,
+       0xf2, 0xa4, 0x96, 0x12, 0xf2, 0xae, 0x40, 0x34,
+       0x67, 0xa8, 0xf5, 0x44, 0xd5, 0x35, 0x53, 0xfe,
+       0xa3, 0x60, 0x96, 0x63, 0x0f, 0x1f, 0x6e, 0xb0,
+       0x5a, 0x42, 0xa6, 0xfc, 0x51, 0x0b, 0x60, 0x27,
+       0xbc, 0x06, 0x71, 0xed, 0x65, 0x5b, 0x23, 0x86,
+       0x4a, 0x07, 0x3b, 0x22, 0x07, 0x46, 0xe6, 0x90,
+       0x3e, 0xf3, 0x25, 0x50, 0x1b, 0x4c, 0x7f, 0x03,
+       0x08, 0xa8, 0x36, 0x6b, 0x87, 0xe5, 0xe3, 0xdb,
+       0x9a, 0x38, 0x83, 0xff, 0x9f, 0x1a, 0x9f, 0x57,
+       0xa4, 0x2a, 0xf6, 0x37, 0xbc, 0x1a, 0xff, 0xc9,
+       0x1e, 0x35, 0x0c, 0xc3, 0x7c, 0xa3, 0xb2, 0xe5,
+       0xd2, 0xc6, 0xb4, 0x57, 0x47, 0xe4, 0x32, 0x16,
+       0x6d, 0xa9, 0xae, 0x64, 0xe6, 0x2d, 0x8d, 0xc5,
+       0x8d, 0x50, 0x8e, 0xe8, 0x1a, 0x22, 0x34, 0x2a,
+       0xd9, 0xeb, 0x51, 0x90, 0x4a, 0xb1, 0x41, 0x7d,
+       0x64, 0xf9, 0xb9, 0x0d, 0xf6, 0x23, 0x33, 0xb0,
+       0x33, 0xf4, 0xf7, 0x3f, 0x27, 0x84, 0xc6, 0x0f,
+       0x54, 0xa5, 0xc0, 0x2e, 0xec, 0x0b, 0x3a, 0x48,
+       0x6e, 0x80, 0x35, 0x81, 0x43, 0x9b, 0x90, 0xb1,
+       0xd0, 0x2b, 0xea, 0x21, 0xdc, 0xda, 0x5b, 0x09,
+       0xf4, 0xcc, 0x10, 0xb4, 0xc7, 0xfe, 0x79, 0x51,
+       0xc3, 0xc5, 0xac, 0x88, 0x74, 0x84, 0x0b, 0x4b,
+       0xca, 0x79, 0x16, 0x29, 0xfb, 0x69, 0x54, 0xdf,
+       0x41, 0x7e, 0xe9, 0xc7, 0x8e, 0xea, 0xa5, 0xfe,
+       0xfc, 0x76, 0x0e, 0x90, 0xc4, 0x92, 0x38, 0xad,
+       0x7b, 0x48, 0xe6, 0x6e, 0xf7, 0x21, 0xfd, 0x4e,
+       0x93, 0x0a, 0x7b, 0x41, 0x83, 0x68, 0xfb, 0x57,
+       0x51, 0x76, 0x34, 0xa9, 0x6c, 0x00, 0xaa, 0x4f,
+       0x66, 0x65, 0x98, 0x4a, 0x4f, 0xa3, 0xa0, 0xef,
+       0x69, 0x3f, 0xe3, 0x1c, 0x92, 0x8c, 0xfd, 0xd8,
+       0xe8, 0xde, 0x7c, 0x7f, 0x3e, 0x84, 0x8e, 0x69,
+       0x3c, 0xf1, 0xf2, 0x05, 0x46, 0xdc, 0x2f, 0x9d,
+       0x5e, 0x6e, 0x4c, 0xfb, 0xb5, 0x99, 0x2a, 0x59,
+       0x63, 0xc1, 0x34, 0xbc, 0x57, 0xc0, 0x0d, 0xb9,
+       0x61, 0x25, 0xf3, 0x33, 0x23, 0x51, 0xb6, 0x0d,
+       0x07, 0xa6, 0xab, 0x94, 0x4a, 0xb7, 0x2a, 0xea,
+       0xee, 0xac, 0xa3, 0xc3, 0x04, 0x8b, 0x0e, 0x56,
+       0xfe, 0x44, 0xa7, 0x39, 0xe2, 0xed, 0xed, 0xb4,
+       0x22, 0x2b, 0xac, 0x12, 0x32, 0x28, 0x91, 0xd8,
+       0xa5, 0xab, 0xff, 0x5f, 0xe0, 0x4b, 0xda, 0x78,
+       0x17, 0xda, 0xf1, 0x01, 0x5b, 0xcd, 0xe2, 0x5f,
+       0x50, 0x45, 0x73, 0x2b, 0xe4, 0x76, 0x77, 0xf4,
+       0x64, 0x1d, 0x43, 0xfb, 0x84, 0x7a, 0xea, 0x91,
+       0xae, 0xf9, 0x9e, 0xb7, 0xb4, 0xb0, 0x91, 0x5f,
+       0x16, 0x35, 0x9a, 0x11, 0xb8, 0xc7, 0xc1, 0x8c,
+       0xc6, 0x10, 0x8d, 0x2f, 0x63, 0x4a, 0xa7, 0x57,
+       0x3a, 0x51, 0xd6, 0x32, 0x2d, 0x64, 0x72, 0xd4,
+       0x66, 0xdc, 0x10, 0xa6, 0x67, 0xd6, 0x04, 0x23,
+       0x9d, 0x0a, 0x11, 0x77, 0xdd, 0x37, 0x94, 0x17,
+       0x3c, 0xbf, 0x8b, 0x65, 0xb0, 0x2e, 0x5e, 0x66,
+       0x47, 0x64, 0xac, 0xdd, 0xf0, 0x84, 0xfd, 0x39,
+       0xfa, 0x15, 0x5d, 0xef, 0xae, 0xca, 0xc1, 0x36,
+       0xa7, 0x5c, 0xbf, 0xc7, 0x08, 0xc2, 0x66, 0x00,
+       0x74, 0x74, 0x4e, 0x27, 0x3f, 0x55, 0x8a, 0xb7,
+       0x38, 0x66, 0x83, 0x6d, 0xcf, 0x99, 0x9e, 0x60,
+       0x8f, 0xdd, 0x2e, 0x62, 0x22, 0x0e, 0xef, 0x0c,
+       0x98, 0xa7, 0x85, 0x74, 0x3b, 0x9d, 0xec, 0x9e,
+       0xa9, 0x19, 0x72, 0xa5, 0x7f, 0x2c, 0x39, 0xb7,
+       0x7d, 0xb7, 0xf1, 0x12, 0x65, 0x27, 0x4b, 0x5a,
+       0xde, 0x17, 0xfe, 0xad, 0x44, 0xf3, 0x20, 0x4d,
+       0xfd, 0xe4, 0x1f, 0xb5, 0x81, 0xb0, 0x36, 0x37,
+       0x08, 0x6f, 0xc3, 0x0c, 0xe9, 0x85, 0x98, 0x82,
+       0xa9, 0x62, 0x0c, 0xc4, 0x97, 0xc0, 0x50, 0xc8,
+       0xa7, 0x3c, 0x50, 0x9f, 0x43, 0xb9, 0xcd, 0x5e,
+       0x4d, 0xfa, 0x1c, 0x4b, 0x0b, 0xa9, 0x98, 0x85,
+       0x38, 0x92, 0xac, 0x8d, 0xe4, 0xad, 0x9b, 0x98,
+       0xab, 0xd9, 0x38, 0xac, 0x62, 0x52, 0xa3, 0x22,
+       0x63, 0x0f, 0xbf, 0x95, 0x48, 0xdf, 0x69, 0xe7,
+       0x8b, 0x33, 0xd5, 0xb2, 0xbd, 0x05, 0x49, 0x49,
+       0x9d, 0x57, 0x73, 0x19, 0x33, 0xae, 0xfa, 0x33,
+       0xf1, 0x19, 0xa8, 0x80, 0xce, 0x04, 0x9f, 0xbc,
+       0x1d, 0x65, 0x82, 0x1b, 0xe5, 0x3a, 0x51, 0xc8,
+       0x1c, 0x21, 0xe3, 0x5d, 0xf3, 0x7d, 0x9b, 0x2f,
+       0x2c, 0x1d, 0x4a, 0x7f, 0x9b, 0x68, 0x35, 0xa3,
+       0xb2, 0x50, 0xf7, 0x62, 0x79, 0xcd, 0xf4, 0x98,
+       0x4f, 0xe5, 0x63, 0x7c, 0x3e, 0x45, 0x31, 0x8c,
+       0x16, 0xa0, 0x12, 0xc8, 0x58, 0xce, 0x39, 0xa6,
+       0xbc, 0x54, 0xdb, 0xc5, 0xe0, 0xd5, 0xba, 0xbc,
+       0xb9, 0x04, 0xf4, 0x8d, 0xe8, 0x2f, 0x15, 0x9d,
+};
+
+/* 100 test cases */
+static struct dahash_test {
+       uint16_t        start;  /* random 12 bit offset in buf */
+       uint16_t        length; /* random 8 bit length of test */
+       xfs_dahash_t    dahash; /* expected dahash result */
+} test[] __initdata =
+{
+       {0x0567, 0x0097, 0x96951389},
+       {0x0869, 0x0055, 0x6455ab4f},
+       {0x0c51, 0x00be, 0x8663afde},
+       {0x044a, 0x00fc, 0x98fbe432},
+       {0x0f29, 0x0079, 0x42371997},
+       {0x08ba, 0x0052, 0x942be4f7},
+       {0x01f2, 0x0013, 0x5262687e},
+       {0x09e3, 0x00e2, 0x8ffb0908},
+       {0x007c, 0x0051, 0xb3158491},
+       {0x0854, 0x001f, 0x83bb20d9},
+       {0x031b, 0x0008, 0x98970bdf},
+       {0x0de7, 0x0027, 0xbfbf6f6c},
+       {0x0f76, 0x0005, 0x906a7105},
+       {0x092e, 0x00d0, 0x86631850},
+       {0x0233, 0x0082, 0xdbdd914e},
+       {0x04c9, 0x0075, 0x5a400a9e},
+       {0x0b66, 0x0099, 0xae128b45},
+       {0x000d, 0x00ed, 0xe61c216a},
+       {0x0a31, 0x003d, 0xf69663b9},
+       {0x00a3, 0x0052, 0x643c39ae},
+       {0x0125, 0x00d5, 0x7c310b0d},
+       {0x0105, 0x004a, 0x06a77e74},
+       {0x0858, 0x008e, 0x265bc739},
+       {0x045e, 0x0095, 0x13d6b192},
+       {0x0dab, 0x003c, 0xc4498704},
+       {0x00cd, 0x00b5, 0x802a4e2d},
+       {0x069b, 0x008c, 0x5df60f71},
+       {0x0454, 0x006c, 0x5f03d8bb},
+       {0x040e, 0x0032, 0x0ce513b5},
+       {0x0874, 0x00e2, 0x6a811fb3},
+       {0x0521, 0x00b4, 0x93296833},
+       {0x0ddc, 0x00cf, 0xf9305338},
+       {0x0a70, 0x0023, 0x239549ea},
+       {0x083e, 0x0027, 0x2d88ba97},
+       {0x0241, 0x00a7, 0xfe0b32e1},
+       {0x0dfc, 0x0096, 0x1a11e815},
+       {0x023e, 0x001e, 0xebc9a1f3},
+       {0x067e, 0x0066, 0xb1067f81},
+       {0x09ea, 0x000e, 0x46fd7247},
+       {0x036b, 0x008c, 0x1a39acdf},
+       {0x078f, 0x0030, 0x964042ab},
+       {0x085c, 0x008f, 0x1829edab},
+       {0x02ec, 0x009f, 0x6aefa72d},
+       {0x043b, 0x00ce, 0x65642ff5},
+       {0x0a32, 0x00b8, 0xbd82759e},
+       {0x0d3c, 0x0087, 0xf4d66d54},
+       {0x09ec, 0x008a, 0x06bfa1ff},
+       {0x0902, 0x0015, 0x755025d2},
+       {0x08fe, 0x000e, 0xf690ce2d},
+       {0x00fb, 0x00dc, 0xe55f1528},
+       {0x0eaa, 0x003a, 0x0fe0a8d7},
+       {0x05fb, 0x0006, 0x86281cfb},
+       {0x0dd1, 0x00a7, 0x60ab51b4},
+       {0x0005, 0x001b, 0xf51d969b},
+       {0x077c, 0x00dd, 0xc2fed268},
+       {0x0575, 0x00f5, 0x432c0b1a},
+       {0x05be, 0x0088, 0x78baa04b},
+       {0x0c89, 0x0068, 0xeda9e428},
+       {0x0f5c, 0x0068, 0xec143c76},
+       {0x06a8, 0x0009, 0xd72651ce},
+       {0x060f, 0x008e, 0x765426cd},
+       {0x07b1, 0x0047, 0x2cfcfa0c},
+       {0x04f1, 0x0041, 0x55b172f9},
+       {0x0e05, 0x00ac, 0x61efde93},
+       {0x0bf7, 0x0097, 0x05b83eee},
+       {0x04e9, 0x00f3, 0x9928223a},
+       {0x023a, 0x0005, 0xdfada9bc},
+       {0x0acb, 0x000e, 0x2217cecd},
+       {0x0148, 0x0060, 0xbc3f7405},
+       {0x0764, 0x0059, 0xcbc201b1},
+       {0x021f, 0x0059, 0x5d6b2256},
+       {0x0f1e, 0x006c, 0xdefeeb45},
+       {0x071c, 0x00b9, 0xb9b59309},
+       {0x0564, 0x0063, 0xae064271},
+       {0x0b14, 0x0044, 0xdb867d9b},
+       {0x0e5a, 0x0055, 0xff06b685},
+       {0x015e, 0x00ba, 0x1115ccbc},
+       {0x0379, 0x00e6, 0x5f4e58dd},
+       {0x013b, 0x0067, 0x4897427e},
+       {0x0e64, 0x0071, 0x7af2b7a4},
+       {0x0a11, 0x0050, 0x92105726},
+       {0x0109, 0x0055, 0xd0d000f9},
+       {0x00aa, 0x0022, 0x815d229d},
+       {0x09ac, 0x004f, 0x02f9d985},
+       {0x0e1b, 0x00ce, 0x5cf92ab4},
+       {0x08af, 0x00d8, 0x17ca72d1},
+       {0x0e33, 0x000a, 0xda2dba6b},
+       {0x0ee3, 0x006a, 0xb00048e5},
+       {0x0648, 0x001a, 0x2364b8cb},
+       {0x0315, 0x0085, 0x0596fd0d},
+       {0x0fbb, 0x003e, 0x298230ca},
+       {0x0422, 0x006a, 0x78ada4ab},
+       {0x04ba, 0x0073, 0xced1fbc2},
+       {0x007d, 0x0061, 0x4b7ff236},
+       {0x070b, 0x00d0, 0x261cf0ae},
+       {0x0c1a, 0x0035, 0x8be92ee2},
+       {0x0af8, 0x0063, 0x824dcf03},
+       {0x08f8, 0x006d, 0xd289710c},
+       {0x021b, 0x00ee, 0x6ac1c41d},
+       {0x05b5, 0x00da, 0x8e52f0e2},
+};
+
+int __init
+xfs_dahash_test(void)
+{
+       unsigned int    i;
+       unsigned int    errors = 0;
+
+       for (i = 0; i < ARRAY_SIZE(test); i++) {
+               xfs_dahash_t    hash;
+
+               hash = xfs_da_hashname(test_buf + test[i].start,
+                               test[i].length);
+               if (hash != test[i].dahash)
+                       errors++;
+       }
+
+       if (errors) {
+               printk(KERN_ERR "xfs dir/attr hash test failed %u times!",
+                               errors);
+               return -ERANGE;
+       }
+
+       return 0;
+}
diff --git a/fs/xfs/xfs_dahash_test.h b/fs/xfs/xfs_dahash_test.h
new file mode 100644 (file)
index 0000000..1a05bf4
--- /dev/null
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2023 Oracle.  All Rights Reserved.
+ * Author: Darrick J. Wong <djwong@kernel.org>
+ */
+#ifndef __XFS_DAHASH_TEST_H__
+#define __XFS_DAHASH_TEST_H__
+
+int xfs_dahash_test(void);
+
+#endif /* __XFS_DAHASH_TEST_H__ */
+
index 69dbe78141280e7f82405129da19f0549a2d6ceb..285885c308bd7db943ca58357305a52a70cdcb6f 100644 (file)
@@ -1090,9 +1090,12 @@ xfs_buffered_write_iomap_begin(
                 */
                if (xfs_has_allocsize(mp))
                        prealloc_blocks = mp->m_allocsize_blocks;
-               else
+               else if (allocfork == XFS_DATA_FORK)
                        prealloc_blocks = xfs_iomap_prealloc_size(ip, allocfork,
                                                offset, count, &icur);
+               else
+                       prealloc_blocks = xfs_iomap_prealloc_size(ip, allocfork,
+                                               offset, count, &ccur);
                if (prealloc_blocks) {
                        xfs_extlen_t    align;
                        xfs_off_t       end_offset;
index 2479b5cbd75ecf51850aa0a1c6dfb79b1def8c2b..4f814f9e12ab50a74d68612ce22c8562b86c3756 100644 (file)
@@ -41,6 +41,7 @@
 #include "xfs_attr_item.h"
 #include "xfs_xattr.h"
 #include "xfs_iunlink_item.h"
+#include "xfs_dahash_test.h"
 
 #include <linux/magic.h>
 #include <linux/fs_context.h>
@@ -2286,6 +2287,10 @@ init_xfs_fs(void)
 
        xfs_check_ondisk_structs();
 
+       error = xfs_dahash_test();
+       if (error)
+               return error;
+
        printk(KERN_INFO XFS_VERSION_STRING " with "
                         XFS_BUILD_OPTIONS " enabled\n");
 
index 7dc0fd6a6504743389d67c571d97c12086e7b497..9c0006c55fec3c869d2484106eba463c581a6fcd 100644 (file)
@@ -1883,6 +1883,13 @@ DEFINE_ALLOC_EVENT(xfs_alloc_vextent_noagbp);
 DEFINE_ALLOC_EVENT(xfs_alloc_vextent_loopfailed);
 DEFINE_ALLOC_EVENT(xfs_alloc_vextent_allfailed);
 
+DEFINE_ALLOC_EVENT(xfs_alloc_vextent_this_ag);
+DEFINE_ALLOC_EVENT(xfs_alloc_vextent_start_ag);
+DEFINE_ALLOC_EVENT(xfs_alloc_vextent_first_ag);
+DEFINE_ALLOC_EVENT(xfs_alloc_vextent_exact_bno);
+DEFINE_ALLOC_EVENT(xfs_alloc_vextent_near_bno);
+DEFINE_ALLOC_EVENT(xfs_alloc_vextent_finish);
+
 TRACE_EVENT(xfs_alloc_cur_check,
        TP_PROTO(struct xfs_mount *mp, xfs_btnum_t btnum, xfs_agblock_t bno,
                 xfs_extlen_t len, xfs_extlen_t diff, bool new),
index 738b0e28d74b521259ae3e3eafececafbb061c93..617e4f9db42eab769674230b12dc85595f12322b 100644 (file)
@@ -383,7 +383,7 @@ static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from)
        struct block_device *bdev = inode->i_sb->s_bdev;
        unsigned int max = bdev_max_zone_append_sectors(bdev);
        struct bio *bio;
-       ssize_t size;
+       ssize_t size = 0;
        int nr_pages;
        ssize_t ret;
 
@@ -426,7 +426,7 @@ static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from)
                if (bio->bi_iter.bi_sector != wpsector) {
                        zonefs_warn(inode->i_sb,
                                "Corrupted write pointer %llu for zone at %llu\n",
-                               wpsector, z->z_sector);
+                               bio->bi_iter.bi_sector, z->z_sector);
                        ret = -EIO;
                }
        }
index 0584e9f6e3397917d361e99f0ba1310f73414fb8..57acb895c03812fb47c44d11fbacef9115b7dafa 100644 (file)
@@ -657,6 +657,7 @@ static inline bool acpi_quirk_skip_acpi_ac_and_battery(void)
 #if IS_ENABLED(CONFIG_X86_ANDROID_TABLETS)
 bool acpi_quirk_skip_i2c_client_enumeration(struct acpi_device *adev);
 int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *skip);
+bool acpi_quirk_skip_gpio_event_handlers(void);
 #else
 static inline bool acpi_quirk_skip_i2c_client_enumeration(struct acpi_device *adev)
 {
@@ -668,6 +669,10 @@ acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *skip)
        *skip = false;
        return 0;
 }
+static inline bool acpi_quirk_skip_gpio_event_handlers(void)
+{
+       return false;
+}
 #endif
 
 #ifdef CONFIG_PM
index 057c8964aefba144d48348a84d11ece09775a130..cbbc9a6dc571587db49b5e9b965f9e3610b83c55 100644 (file)
@@ -21,6 +21,7 @@
 #define CNTHCTL_EVNTEN                 (1 << 2)
 #define CNTHCTL_EVNTDIR                        (1 << 3)
 #define CNTHCTL_EVNTI                  (0xF << 4)
+#define CNTHCTL_ECV                    (1 << 12)
 
 enum arch_timer_reg {
        ARCH_TIMER_REG_CTRL,
index 42f86327b40a790f71d5d9d92369ce9f053867df..bf964cdfb3300d98365be6ae487f8d69d2d71f47 100644 (file)
@@ -423,11 +423,11 @@ struct drm_bridge_funcs {
         *
         * The returned array must be allocated with kmalloc() and will be
         * freed by the caller. If the allocation fails, NULL should be
-        * returned. num_output_fmts must be set to the returned array size.
+        * returned. num_input_fmts must be set to the returned array size.
         * Formats listed in the returned array should be listed in decreasing
         * preference order (the core will try all formats until it finds one
         * that works). When the format is not supported NULL should be
-        * returned and num_output_fmts should be set to 0.
+        * returned and num_input_fmts should be set to 0.
         *
         * This method is called on all elements of the bridge chain as part of
         * the bus format negotiation process that happens in
index 772a4adf52870a1fba07d0976b7b447c3de94abe..f1f00fc2dba614b6fa3de952030be476f1d85b8c 100644 (file)
@@ -476,7 +476,9 @@ int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
 void drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock);
 void drm_gem_lru_remove(struct drm_gem_object *obj);
 void drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj);
-unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan,
+unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru,
+                              unsigned int nr_to_scan,
+                              unsigned long *remaining,
                               bool (*shrink)(struct drm_gem_object *obj));
 
 #endif /* __DRM_GEM_H__ */
index c52a6e6839da9c8df41d50fa295f9ccc0871f14c..bb3cb005873ec9828b1ece587755cb8a9fc9b5ed 100644 (file)
@@ -13,6 +13,9 @@
 enum kvm_arch_timers {
        TIMER_PTIMER,
        TIMER_VTIMER,
+       NR_KVM_EL0_TIMERS,
+       TIMER_HVTIMER = NR_KVM_EL0_TIMERS,
+       TIMER_HPTIMER,
        NR_KVM_TIMERS
 };
 
@@ -21,6 +24,7 @@ enum kvm_arch_timer_regs {
        TIMER_REG_CVAL,
        TIMER_REG_TVAL,
        TIMER_REG_CTL,
+       TIMER_REG_VOFF,
 };
 
 struct arch_timer_offset {
@@ -29,21 +33,29 @@ struct arch_timer_offset {
         * structure. If NULL, assume a zero offset.
         */
        u64     *vm_offset;
+       /*
+        * If set, pointer to one of the offsets in the vcpu's sysreg
+        * array. If NULL, assume a zero offset.
+        */
+       u64     *vcpu_offset;
 };
 
 struct arch_timer_vm_data {
        /* Offset applied to the virtual timer/counter */
        u64     voffset;
+       /* Offset applied to the physical timer/counter */
+       u64     poffset;
+
+       /* The PPI for each timer, global to the VM */
+       u8      ppi[NR_KVM_TIMERS];
 };
 
 struct arch_timer_context {
        struct kvm_vcpu                 *vcpu;
 
-       /* Timer IRQ */
-       struct kvm_irq_level            irq;
-
        /* Emulated Timer (may be unused) */
        struct hrtimer                  hrtimer;
+       u64                             ns_frac;
 
        /* Offset for this counter/timer */
        struct arch_timer_offset        offset;
@@ -54,14 +66,19 @@ struct arch_timer_context {
         */
        bool                            loaded;
 
+       /* Output level of the timer IRQ */
+       struct {
+               bool                    level;
+       } irq;
+
        /* Duplicated state from arch_timer.c for convenience */
        u32                             host_timer_irq;
-       u32                             host_timer_irq_flags;
 };
 
 struct timer_map {
        struct arch_timer_context *direct_vtimer;
        struct arch_timer_context *direct_ptimer;
+       struct arch_timer_context *emul_vtimer;
        struct arch_timer_context *emul_ptimer;
 };
 
@@ -84,6 +101,8 @@ bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu);
 void kvm_timer_update_run(struct kvm_vcpu *vcpu);
 void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu);
 
+void kvm_timer_init_vm(struct kvm *kvm);
+
 u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid);
 int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value);
 
@@ -98,15 +117,18 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu);
 
 void kvm_timer_init_vhe(void);
 
-bool kvm_arch_timer_get_input_level(int vintid);
-
 #define vcpu_timer(v)  (&(v)->arch.timer_cpu)
 #define vcpu_get_timer(v,t)    (&vcpu_timer(v)->timers[(t)])
 #define vcpu_vtimer(v) (&(v)->arch.timer_cpu.timers[TIMER_VTIMER])
 #define vcpu_ptimer(v) (&(v)->arch.timer_cpu.timers[TIMER_PTIMER])
+#define vcpu_hvtimer(v)        (&(v)->arch.timer_cpu.timers[TIMER_HVTIMER])
+#define vcpu_hptimer(v)        (&(v)->arch.timer_cpu.timers[TIMER_HPTIMER])
 
 #define arch_timer_ctx_index(ctx)      ((ctx) - vcpu_timer((ctx)->vcpu)->timers)
 
+#define timer_vm_data(ctx)             (&(ctx)->vcpu->kvm->arch.timer_data)
+#define timer_irq(ctx)                 (timer_vm_data(ctx)->ppi[arch_timer_ctx_index(ctx)])
+
 u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu,
                              enum kvm_arch_timers tmr,
                              enum kvm_arch_timer_regs treg);
index 1188f116cf4e9dafbde939eac6d17cc5235d9650..2df152207ccd00bc8abb53f6b0827cfc4ee96e5c 100644 (file)
@@ -6,7 +6,7 @@
 
 #include <asm/kvm_emulate.h>
 
-int kvm_hvc_call_handler(struct kvm_vcpu *vcpu);
+int kvm_smccc_call_handler(struct kvm_vcpu *vcpu);
 
 static inline u32 smccc_get_function(struct kvm_vcpu *vcpu)
 {
@@ -43,9 +43,13 @@ static inline void smccc_set_retval(struct kvm_vcpu *vcpu,
 struct kvm_one_reg;
 
 void kvm_arm_init_hypercalls(struct kvm *kvm);
+void kvm_arm_teardown_hypercalls(struct kvm *kvm);
 int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu);
 int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
 int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
 int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
 
+int kvm_vm_smccc_has_attr(struct kvm *kvm, struct kvm_device_attr *attr);
+int kvm_vm_smccc_set_attr(struct kvm *kvm, struct kvm_device_attr *attr);
+
 #endif
index d3ad51fde9db0eb0e0791427dcd629b9d883bb98..402b545959af7b2e4e7aa75b5f4271896d2d78d9 100644 (file)
@@ -380,6 +380,7 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
 int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
                          u32 vintid, struct irq_ops *ops);
 int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid);
+int kvm_vgic_get_map(struct kvm_vcpu *vcpu, unsigned int vintid);
 bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid);
 
 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
index 0a24ab7cb66fa2eab5650f815d9a8370e9fb8cab..8e2eefa9fbc0f0747b756a2097cb882a95362873 100644 (file)
@@ -9,7 +9,14 @@
 #include <linux/phy.h>
 
 #if IS_ENABLED(CONFIG_ACPI_MDIO)
-int acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode);
+int __acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode,
+                           struct module *owner);
+
+static inline int
+acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *handle)
+{
+       return __acpi_mdiobus_register(mdio, handle, THIS_MODULE);
+}
 #else /* CONFIG_ACPI_MDIO */
 static inline int
 acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode)
index dd5ce1137f04aeed0ce86b732b14d34c36710716..de0b0c3e7395a60f2316a99536fc58927eb36427 100644 (file)
@@ -228,6 +228,12 @@ static inline unsigned short req_get_ioprio(struct request *req)
        *(listptr) = rq;                                \
 } while (0)
 
+#define rq_list_add_tail(lastpptr, rq) do {            \
+       (rq)->rq_next = NULL;                           \
+       **(lastpptr) = rq;                              \
+       *(lastpptr) = &rq->rq_next;                     \
+} while (0)
+
 #define rq_list_pop(listptr)                           \
 ({                                                     \
        struct request *__req = NULL;                   \
index d1aee08f8c1811d645856ce4b92e24cb4cd236ab..941304f17492f05708d925bfd347755b2e5f272e 100644 (file)
@@ -1446,11 +1446,10 @@ static inline void blk_wake_io_task(struct task_struct *waiter)
                wake_up_process(waiter);
 }
 
-unsigned long bdev_start_io_acct(struct block_device *bdev,
-                                unsigned int sectors, enum req_op op,
+unsigned long bdev_start_io_acct(struct block_device *bdev, enum req_op op,
                                 unsigned long start_time);
 void bdev_end_io_acct(struct block_device *bdev, enum req_op op,
-               unsigned long start_time);
+                     unsigned int sectors, unsigned long start_time);
 
 unsigned long bio_start_io_acct(struct bio *bio);
 void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
index 842e72a5348fa3f571a1f1669af3c6c617ffe905..6f3175f0678a8d78153d96157eef9edc9bfce36e 100644 (file)
@@ -1363,7 +1363,13 @@ struct clk_hw_onecell_data {
        struct clk_hw *hws[];
 };
 
-#define CLK_OF_DECLARE(name, compat, fn) OF_DECLARE_1(clk, name, compat, fn)
+#define CLK_OF_DECLARE(name, compat, fn) \
+       static void __init __##name##_of_clk_init_declare(struct device_node *np) \
+       {                                                               \
+               fn(np);                                                 \
+               fwnode_dev_initialized(of_fwnode_handle(np), true);     \
+       }                                                               \
+       OF_DECLARE_1(clk, name, compat, __##name##_of_clk_init_declare)
 
 /*
  * Use this macro when you have a driver that requires two initialization
index d4afa8508a806bc500c3a058799722eb9c74e749..3a7909ed54980d7a373335874c43135f57c3f384 100644 (file)
@@ -96,6 +96,7 @@ static inline void user_exit_irqoff(void) { }
 static inline int exception_enter(void) { return 0; }
 static inline void exception_exit(enum ctx_state prev_ctx) { }
 static inline int ct_state(void) { return -1; }
+static inline int __ct_state(void) { return -1; }
 static __always_inline bool context_tracking_guest_enter(void) { return false; }
 static inline void context_tracking_guest_exit(void) { }
 #define CT_WARN_ON(cond) do { } while (0)
index 4a4d56f771802bf97cfcf16eee960ca948a29974..fdd537ea513ffa35562cc636900e0a0a5b7faaa4 100644 (file)
@@ -46,7 +46,9 @@ struct context_tracking {
 
 #ifdef CONFIG_CONTEXT_TRACKING
 DECLARE_PER_CPU(struct context_tracking, context_tracking);
+#endif
 
+#ifdef CONFIG_CONTEXT_TRACKING_USER
 static __always_inline int __ct_state(void)
 {
        return arch_atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_STATE_MASK;
index d4901ca8883c33ea27b14d51dcd1d44cda53156d..ca736b05ec7b056ebafae5f347913ce1fec97d2d 100644 (file)
@@ -350,6 +350,23 @@ unsigned int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int sta
 #define for_each_cpu_andnot(cpu, mask1, mask2)                         \
        for_each_andnot_bit(cpu, cpumask_bits(mask1), cpumask_bits(mask2), small_cpumask_bits)
 
+/**
+ * for_each_cpu_or - iterate over every cpu present in either mask
+ * @cpu: the (optionally unsigned) integer iterator
+ * @mask1: the first cpumask pointer
+ * @mask2: the second cpumask pointer
+ *
+ * This saves a temporary CPU mask in many places.  It is equivalent to:
+ *     struct cpumask tmp;
+ *     cpumask_or(&tmp, &mask1, &mask2);
+ *     for_each_cpu(cpu, &tmp)
+ *             ...
+ *
+ * After the loop, cpu is >= nr_cpu_ids.
+ */
+#define for_each_cpu_or(cpu, mask1, mask2)                             \
+       for_each_or_bit(cpu, cpumask_bits(mask1), cpumask_bits(mask2), small_cpumask_bits)
+
 /**
  * cpumask_any_but - return a "random" in a cpumask, but not this one.
  * @mask: the cpumask to search
index 04a733f0ba9562115470787468c3fc10b605a9d4..7aa62c92185f6467069fe07aedb8cf4e0041d352 100644 (file)
@@ -693,6 +693,7 @@ efi_guid_to_str(efi_guid_t *guid, char *out)
 }
 
 extern void efi_init (void);
+extern void efi_earlycon_reprobe(void);
 #ifdef CONFIG_EFI
 extern void efi_enter_virtual_mode (void);     /* switch EFI to virtual mode, if possible */
 #else
index d8d20514ea052259c466b311fc058b5faa7cb9c6..02d09cb57f6c92ec13906352e831118f06dbb1ae 100644 (file)
@@ -212,6 +212,7 @@ struct fb_deferred_io {
        /* delay between mkwrite and deferred handler */
        unsigned long delay;
        bool sort_pagereflist; /* sort pagelist by offset */
+       int open_count; /* number of opened files; protected by fb_info lock */
        struct mutex lock; /* mutex that protects the pageref list */
        struct list_head pagereflist; /* list of pagerefs for touched pages */
        /* callback */
index 4647864a5ffdbb972fb1a1a21eb79178755b72d5..5e4f39ef2e72cc00b5924f048d60394e6e579313 100644 (file)
@@ -14,6 +14,8 @@ unsigned long _find_next_and_bit(const unsigned long *addr1, const unsigned long
                                        unsigned long nbits, unsigned long start);
 unsigned long _find_next_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
                                        unsigned long nbits, unsigned long start);
+unsigned long _find_next_or_bit(const unsigned long *addr1, const unsigned long *addr2,
+                                       unsigned long nbits, unsigned long start);
 unsigned long _find_next_zero_bit(const unsigned long *addr, unsigned long nbits,
                                         unsigned long start);
 extern unsigned long _find_first_bit(const unsigned long *addr, unsigned long size);
@@ -127,6 +129,36 @@ unsigned long find_next_andnot_bit(const unsigned long *addr1,
 }
 #endif
 
+#ifndef find_next_or_bit
+/**
+ * find_next_or_bit - find the next set bit in either memory regions
+ * @addr1: The first address to base the search on
+ * @addr2: The second address to base the search on
+ * @size: The bitmap size in bits
+ * @offset: The bitnumber to start searching at
+ *
+ * Returns the bit number for the next set bit
+ * If no bits are set, returns @size.
+ */
+static inline
+unsigned long find_next_or_bit(const unsigned long *addr1,
+               const unsigned long *addr2, unsigned long size,
+               unsigned long offset)
+{
+       if (small_const_nbits(size)) {
+               unsigned long val;
+
+               if (unlikely(offset >= size))
+                       return size;
+
+               val = (*addr1 | *addr2) & GENMASK(size - 1, offset);
+               return val ? __ffs(val) : size;
+       }
+
+       return _find_next_or_bit(addr1, addr2, size, offset);
+}
+#endif
+
 #ifndef find_next_zero_bit
 /**
  * find_next_zero_bit - find the next cleared bit in a memory region
@@ -536,6 +568,11 @@ unsigned long find_next_bit_le(const void *addr, unsigned
             (bit) = find_next_andnot_bit((addr1), (addr2), (size), (bit)), (bit) < (size);\
             (bit)++)
 
+#define for_each_or_bit(bit, addr1, addr2, size) \
+       for ((bit) = 0;                                                                 \
+            (bit) = find_next_or_bit((addr1), (addr2), (size), (bit)), (bit) < (size);\
+            (bit)++)
+
 /* same as for_each_set_bit() but use bit as value to start with */
 #define for_each_set_bit_from(bit, addr, size) \
        for (; (bit) = find_next_bit((addr), (size), (bit)), (bit) < (size); (bit)++)
index cd5c5a27557f5605b78eed7531b58124b114fbd6..d12cd18aab3f49448a49f46c0e56689329c234e3 100644 (file)
@@ -122,6 +122,9 @@ int icc_link_destroy(struct icc_node *src, struct icc_node *dst);
 void icc_node_add(struct icc_node *node, struct icc_provider *provider);
 void icc_node_del(struct icc_node *node);
 int icc_nodes_remove(struct icc_provider *provider);
+void icc_provider_init(struct icc_provider *provider);
+int icc_provider_register(struct icc_provider *provider);
+void icc_provider_deregister(struct icc_provider *provider);
 int icc_provider_add(struct icc_provider *provider);
 void icc_provider_del(struct icc_provider *provider);
 struct icc_node_data *of_icc_get_from_provider(struct of_phandle_args *spec);
@@ -167,6 +170,15 @@ static inline int icc_nodes_remove(struct icc_provider *provider)
        return -ENOTSUPP;
 }
 
+static inline void icc_provider_init(struct icc_provider *provider) { }
+
+static inline int icc_provider_register(struct icc_provider *provider)
+{
+       return -ENOTSUPP;
+}
+
+static inline void icc_provider_deregister(struct icc_provider *provider) { }
+
 static inline int icc_provider_add(struct icc_provider *provider)
 {
        return -ENOTSUPP;
index 934e5dd4ccc08d14f8d8731a41fd73c3c73d4d96..35b9328ca3352eeca32b89914ee169421f965262 100644 (file)
@@ -27,7 +27,7 @@ struct io_uring_cmd {
        const void      *cmd;
        union {
                /* callback to defer completions to task context */
-               void (*task_work_cb)(struct io_uring_cmd *cmd);
+               void (*task_work_cb)(struct io_uring_cmd *cmd, unsigned);
                /* used for polled completion */
                void *cookie;
        };
@@ -39,9 +39,10 @@ struct io_uring_cmd {
 #if defined(CONFIG_IO_URING)
 int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
                              struct iov_iter *iter, void *ioucmd);
-void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, ssize_t res2);
+void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, ssize_t res2,
+                       unsigned issue_flags);
 void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
-                       void (*task_work_cb)(struct io_uring_cmd *));
+                       void (*task_work_cb)(struct io_uring_cmd *, unsigned));
 struct sock *io_uring_get_socket(struct file *file);
 void __io_uring_cancel(bool cancel_all);
 void __io_uring_free(struct task_struct *tsk);
@@ -72,11 +73,11 @@ static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
        return -EOPNOTSUPP;
 }
 static inline void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret,
-               ssize_t ret2)
+               ssize_t ret2, unsigned issue_flags)
 {
 }
 static inline void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
-                       void (*task_work_cb)(struct io_uring_cmd *))
+                       void (*task_work_cb)(struct io_uring_cmd *, unsigned))
 {
 }
 static inline struct sock *io_uring_get_socket(struct file *file)
index 90edc16d37e5982fbd88b818770fb7d647e40ebc..9696c2fb30e95016f6efeea06f4feb77397a2215 100644 (file)
@@ -58,7 +58,7 @@
 
 /*
  * Bit 63 of the memslot generation number is an "update in-progress flag",
- * e.g. is temporarily set for the duration of install_new_memslots().
+ * e.g. is temporarily set for the duration of kvm_swap_active_memslots().
  * This flag effectively creates a unique generation number that is used to
  * mark cached memslot data, e.g. MMIO accesses, as potentially being stale,
  * i.e. may (or may not) have come from the previous memslots generation.
@@ -713,7 +713,7 @@ struct kvm {
         * use by the VM. To be used under the slots_lock (above) or in a
         * kvm->srcu critical section where acquiring the slots_lock would
         * lead to deadlock with the synchronize_srcu in
-        * install_new_memslots.
+        * kvm_swap_active_memslots().
         */
        struct mutex slots_arch_lock;
        struct mm_struct *mm; /* userspace tied to this vm */
index 2728d49bbdf6d34cf6cf94dc9adebf2480ddbe22..6f4737d5046a41181401c1be7a663f0d7eb76a95 100644 (file)
@@ -91,11 +91,11 @@ struct gfn_to_pfn_cache {
  * is topped up (__kvm_mmu_topup_memory_cache()).
  */
 struct kvm_mmu_memory_cache {
-       int nobjs;
        gfp_t gfp_zero;
        gfp_t gfp_custom;
        struct kmem_cache *kmem_cache;
        int capacity;
+       int nobjs;
        void **objects;
 };
 #endif
index 9a6b55da8fd6442c895267128a02ee02682971ed..72831e35dca32d7412d2705fd38b00ac029d5927 100644 (file)
@@ -22,6 +22,7 @@
 #define        nlm4_fbig               cpu_to_be32(NLM_FBIG)
 #define        nlm4_failed             cpu_to_be32(NLM_FAILED)
 
+void   nlm4svc_set_file_lock_range(struct file_lock *fl, u64 off, u64 len);
 bool   nlm4svc_decode_void(struct svc_rqst *rqstp, struct xdr_stream *xdr);
 bool   nlm4svc_decode_testargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
 bool   nlm4svc_decode_lockargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
index 6a14b7b117668ca2c8b1f914ecb49fb4311784ab..470085b121d3c241e8045416e9d0426eb8a43850 100644 (file)
@@ -297,9 +297,11 @@ struct hh_cache {
  * relationship HH alignment <= LL alignment.
  */
 #define LL_RESERVED_SPACE(dev) \
-       ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
+       ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom)) \
+         & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
-       ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
+       ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom) + (extra)) \
+         & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
 
 struct header_ops {
        int     (*create) (struct sk_buff *skb, struct net_device *dev,
index 75470159a194d97fa91e208999217767dd3c9761..57ebe1267f7fbe96166e9c40621192330d6fcbc4 100644 (file)
@@ -115,8 +115,9 @@ struct nvme_tcp_icresp_pdu {
 struct nvme_tcp_term_pdu {
        struct nvme_tcp_hdr     hdr;
        __le16                  fes;
-       __le32                  fei;
-       __u8                    rsvd[8];
+       __le16                  feil;
+       __le16                  feiu;
+       __u8                    rsvd[10];
 };
 
 /**
index 4fad4aa245fb0621ee3adbec1793981ae54f1ea7..779507ac750b803db2c8a185d461e6114969b5b3 100644 (file)
@@ -812,6 +812,7 @@ enum nvme_opcode {
                nvme_opcode_name(nvme_cmd_compare),             \
                nvme_opcode_name(nvme_cmd_write_zeroes),        \
                nvme_opcode_name(nvme_cmd_dsm),                 \
+               nvme_opcode_name(nvme_cmd_verify),              \
                nvme_opcode_name(nvme_cmd_resv_register),       \
                nvme_opcode_name(nvme_cmd_resv_report),         \
                nvme_opcode_name(nvme_cmd_resv_acquire),        \
@@ -1144,10 +1145,14 @@ enum nvme_admin_opcode {
                nvme_admin_opcode_name(nvme_admin_ns_mgmt),             \
                nvme_admin_opcode_name(nvme_admin_activate_fw),         \
                nvme_admin_opcode_name(nvme_admin_download_fw),         \
+               nvme_admin_opcode_name(nvme_admin_dev_self_test),       \
                nvme_admin_opcode_name(nvme_admin_ns_attach),           \
                nvme_admin_opcode_name(nvme_admin_keep_alive),          \
                nvme_admin_opcode_name(nvme_admin_directive_send),      \
                nvme_admin_opcode_name(nvme_admin_directive_recv),      \
+               nvme_admin_opcode_name(nvme_admin_virtual_mgmt),        \
+               nvme_admin_opcode_name(nvme_admin_nvme_mi_send),        \
+               nvme_admin_opcode_name(nvme_admin_nvme_mi_recv),        \
                nvme_admin_opcode_name(nvme_admin_dbbuf),               \
                nvme_admin_opcode_name(nvme_admin_format_nvm),          \
                nvme_admin_opcode_name(nvme_admin_security_send),       \
index da633d34ab866bd92a693459a2a23b69de6940fb..8a52ef2e6fa6bb495313edad7026df3f1dedcb84 100644 (file)
 
 #if IS_ENABLED(CONFIG_OF_MDIO)
 bool of_mdiobus_child_is_phy(struct device_node *child);
-int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np);
-int devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio,
-                            struct device_node *np);
+int __of_mdiobus_register(struct mii_bus *mdio, struct device_node *np,
+                         struct module *owner);
+
+static inline int of_mdiobus_register(struct mii_bus *mdio,
+                                     struct device_node *np)
+{
+       return __of_mdiobus_register(mdio, np, THIS_MODULE);
+}
+
+int __devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio,
+                              struct device_node *np, struct module *owner);
+
+static inline int devm_of_mdiobus_register(struct device *dev,
+                                          struct mii_bus *mdio,
+                                          struct device_node *np)
+{
+       return __devm_of_mdiobus_register(dev, mdio, np, THIS_MODULE);
+}
+
 struct mdio_device *of_mdio_find_device(struct device_node *np);
 struct phy_device *of_phy_find_device(struct device_node *phy_np);
 struct phy_device *
index fafd8020c6d7f1f7a02b82199874d11b6b0870dd..b50e5c79f7e32fd9fb9c4b118677bda46b387be7 100644 (file)
@@ -1438,6 +1438,7 @@ void pci_bus_add_resource(struct pci_bus *bus, struct resource *res,
                          unsigned int flags);
 struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n);
 void pci_bus_remove_resources(struct pci_bus *bus);
+void pci_bus_remove_resource(struct pci_bus *bus, struct resource *res);
 int devm_request_pci_bus_resources(struct device *dev,
                                   struct list_head *resources);
 
index 521a733e21a920cc04a401788048143ecb64f236..75b73c83bc9d0d1074c9e0f67ad39776b12c5eb1 100644 (file)
@@ -45,7 +45,6 @@ void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
 void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
                              s32 batch);
 s64 __percpu_counter_sum(struct percpu_counter *fbc);
-s64 percpu_counter_sum_all(struct percpu_counter *fbc);
 int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
 void percpu_counter_sync(struct percpu_counter *fbc);
 
@@ -196,11 +195,6 @@ static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
        return percpu_counter_read(fbc);
 }
 
-static inline s64 percpu_counter_sum_all(struct percpu_counter *fbc)
-{
-       return percpu_counter_read(fbc);
-}
-
 static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
 {
        return true;
index a152678b82b714bd53f59764a8772627c8e25a05..a2414c18748370f40c0e8d99c0056e56103a26af 100644 (file)
@@ -215,7 +215,7 @@ struct plat_stmmacenet_data {
        int unicast_filter_entries;
        int tx_fifo_size;
        int rx_fifo_size;
-       u32 addr64;
+       u32 host_dma_width;
        u32 rx_queues_to_use;
        u32 tx_queues_to_use;
        u8 rx_sched_algorithm;
index 8ba8b5be5567511dfecf4386de133af14e359807..c1ef5fc60a3cba54d640a3e77dc419506801f42c 100644 (file)
@@ -70,11 +70,16 @@ static inline void sysfb_disable(void)
 #ifdef CONFIG_EFI
 
 extern struct efifb_dmi_info efifb_dmi_list[];
-void sysfb_apply_efi_quirks(struct platform_device *pd);
+void sysfb_apply_efi_quirks(void);
+void sysfb_set_efifb_fwnode(struct platform_device *pd);
 
 #else /* CONFIG_EFI */
 
-static inline void sysfb_apply_efi_quirks(struct platform_device *pd)
+static inline void sysfb_apply_efi_quirks(void)
+{
+}
+
+static inline void sysfb_set_efifb_fwnode(struct platform_device *pd)
 {
 }
 
index 2bb4bf33f4f32df80449108f53aaa7c193a75fb2..13c6aaed18df3e2f63a9da830f448e7575988c73 100644 (file)
@@ -384,6 +384,7 @@ devm_thermal_of_cooling_device_register(struct device *dev,
                                struct device_node *np,
                                char *type, void *devdata,
                                const struct thermal_cooling_device_ops *ops);
+void thermal_cooling_device_update(struct thermal_cooling_device *);
 void thermal_cooling_device_unregister(struct thermal_cooling_device *);
 struct thermal_zone_device *thermal_zone_get_zone_by_name(const char *name);
 int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp);
index e299f29375bb7f1e55789adca45b45e31bebde84..6811e43c1b5c2afed0bad0a2e7469e8573916eb9 100644 (file)
@@ -242,12 +242,11 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
  * not add unwanted padding between the beginning of the section and the
  * structure. Force alignment to the same alignment as the section start.
  *
- * When lockdep is enabled, we make sure to always do the RCU portions of
- * the tracepoint code, regardless of whether tracing is on. However,
- * don't check if the condition is false, due to interaction with idle
- * instrumentation. This lets us find RCU issues triggered with tracepoints
- * even when this tracepoint is off. This code has no purpose other than
- * poking RCU a bit.
+ * When lockdep is enabled, we make sure to always test if RCU is
+ * "watching" regardless if the tracepoint is enabled or not. Tracepoints
+ * require RCU to be active, and it should always warn at the tracepoint
+ * site if it is not watching, as it will need to be active when the
+ * tracepoint is enabled.
  */
 #define __DECLARE_TRACE(name, proto, args, cond, data_proto)           \
        extern int __traceiter_##name(data_proto);                      \
@@ -260,9 +259,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
                                TP_ARGS(args),                          \
                                TP_CONDITION(cond), 0);                 \
                if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) {             \
-                       rcu_read_lock_sched_notrace();                  \
-                       rcu_dereference_sched(__tracepoint_##name.funcs);\
-                       rcu_read_unlock_sched_notrace();                \
+                       WARN_ON_ONCE(!rcu_is_watching());               \
                }                                                       \
        }                                                               \
        __DECLARE_TRACE_RCU(name, PARAMS(proto), PARAMS(args),          \
index 7254edfba4c9c43fd0ea2f6f44fe1b2d2083d2e4..6ed9b4d546a7ac897dd4ba90d72240182c60f2e1 100644 (file)
@@ -1613,6 +1613,7 @@ void hci_conn_add_sysfs(struct hci_conn *conn);
 void hci_conn_del_sysfs(struct hci_conn *conn);
 
 #define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->dev.parent = (pdev))
+#define GET_HCIDEV_DEV(hdev) ((hdev)->dev.parent)
 
 /* ----- LMP capabilities ----- */
 #define lmp_encrypt_capable(dev)   ((dev)->features[0][0] & LMP_ENCRYPT)
index d517bfac937b0a2bda248f594750fbb0c9a082ab..41c57b8b167147bb4544a9bdcb92e7a61459fc40 100644 (file)
@@ -428,12 +428,18 @@ MAX_XDP_METADATA_KFUNC,
 #ifdef CONFIG_NET
 u32 bpf_xdp_metadata_kfunc_id(int id);
 bool bpf_dev_bound_kfunc_id(u32 btf_id);
+void xdp_set_features_flag(struct net_device *dev, xdp_features_t val);
 void xdp_features_set_redirect_target(struct net_device *dev, bool support_sg);
 void xdp_features_clear_redirect_target(struct net_device *dev);
 #else
 static inline u32 bpf_xdp_metadata_kfunc_id(int id) { return 0; }
 static inline bool bpf_dev_bound_kfunc_id(u32 btf_id) { return false; }
 
+static inline void
+xdp_set_features_flag(struct net_device *dev, xdp_features_t val)
+{
+}
+
 static inline void
 xdp_features_set_redirect_target(struct net_device *dev, bool support_sg)
 {
@@ -445,4 +451,9 @@ xdp_features_clear_redirect_target(struct net_device *dev)
 }
 #endif
 
+static inline void xdp_clear_features_flag(struct net_device *dev)
+{
+       xdp_set_features_flag(dev, 0);
+}
+
 #endif /* __LINUX_NET_XDP_H__ */
index de310f21406c546f7d8d0819e56ef4a9a4ec7ea4..f10a008e5bfa1408b4836465015c9c3ac984cb32 100644 (file)
@@ -145,6 +145,7 @@ struct scsi_device {
        const char * model;             /* ... after scan; point to static string */
        const char * rev;               /* ... "nullnullnullnull" before scan */
 
+#define SCSI_DEFAULT_VPD_LEN   255     /* default SCSI VPD page size (max) */
        struct scsi_vpd __rcu *vpd_pg0;
        struct scsi_vpd __rcu *vpd_pg83;
        struct scsi_vpd __rcu *vpd_pg80;
@@ -215,6 +216,7 @@ struct scsi_device {
                                         * creation time */
        unsigned ignore_media_change:1; /* Ignore MEDIA CHANGE on resume */
        unsigned silence_suspend:1;     /* Do not print runtime PM related messages */
+       unsigned no_vpd_size:1;         /* No VPD size reported in header */
 
        unsigned int queue_stopped;     /* request queue is quiesced */
        bool offline_already;           /* Device offline message logged */
index 5d14adae21c78903188052187076eeec9644188a..6b548dc2c49654c95aa9e213692e2931395701e6 100644 (file)
@@ -32,7 +32,8 @@
 #define BLIST_IGN_MEDIA_CHANGE ((__force blist_flags_t)(1ULL << 11))
 /* do not do automatic start on add */
 #define BLIST_NOSTARTONADD     ((__force blist_flags_t)(1ULL << 12))
-#define __BLIST_UNUSED_13      ((__force blist_flags_t)(1ULL << 13))
+/* do not ask for VPD page size first on some broken targets */
+#define BLIST_NO_VPD_SIZE      ((__force blist_flags_t)(1ULL << 13))
 #define __BLIST_UNUSED_14      ((__force blist_flags_t)(1ULL << 14))
 #define __BLIST_UNUSED_15      ((__force blist_flags_t)(1ULL << 15))
 #define __BLIST_UNUSED_16      ((__force blist_flags_t)(1ULL << 16))
@@ -74,8 +75,7 @@
 #define __BLIST_HIGH_UNUSED (~(__BLIST_LAST_USED | \
                               (__force blist_flags_t) \
                               ((__force __u64)__BLIST_LAST_USED - 1ULL)))
-#define __BLIST_UNUSED_MASK (__BLIST_UNUSED_13 | \
-                            __BLIST_UNUSED_14 | \
+#define __BLIST_UNUSED_MASK (__BLIST_UNUSED_14 | \
                             __BLIST_UNUSED_15 | \
                             __BLIST_UNUSED_16 | \
                             __BLIST_UNUSED_24 | \
index 216de5f0362101d8adf9f9e231f03c086951df0d..f8d61485de16deadad88d6062676d2374f1079e5 100644 (file)
@@ -35,7 +35,7 @@ TRACE_EVENT(vm_unmapped_area,
                __entry->align_offset = info->align_offset;
        ),
 
-       TP_printk("addr=0x%lx err=%ld total_vm=0x%lx flags=0x%lx len=0x%lx lo=0x%lx hi=0x%lx mask=0x%lx ofs=0x%lx\n",
+       TP_printk("addr=0x%lx err=%ld total_vm=0x%lx flags=0x%lx len=0x%lx lo=0x%lx hi=0x%lx mask=0x%lx ofs=0x%lx",
                IS_ERR_VALUE(__entry->addr) ? 0 : __entry->addr,
                IS_ERR_VALUE(__entry->addr) ? __entry->addr : 0,
                __entry->total_vm, __entry->flags, __entry->length,
@@ -110,7 +110,7 @@ TRACE_EVENT(exit_mmap,
                       __entry->mt              = &mm->mm_mt;
        ),
 
-       TP_printk("mt_mod %p, DESTROY\n",
+       TP_printk("mt_mod %p, DESTROY",
                  __entry->mt
        )
 );
index 5041c3598493874ce15b554d03e04df4f3c518d3..b5cd3e7b3775a61690ce5998b6fe379de34b9e01 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause */
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
 /* Do not edit directly, auto-generated from: */
 /*     Documentation/netlink/specs/fou.yaml */
 /* YNL-GEN uapi header */
index 4003a166328cc38027b4037527da91738944cf6b..737318b1c1d9a16345853c6c561847bf6270e435 100644 (file)
@@ -341,8 +341,13 @@ struct kvm_run {
                        __u64 nr;
                        __u64 args[6];
                        __u64 ret;
-                       __u32 longmode;
-                       __u32 pad;
+
+                       union {
+#ifndef __KERNEL__
+                               __u32 longmode;
+#endif
+                               __u64 flags;
+                       };
                } hypercall;
                /* KVM_EXIT_TPR_ACCESS */
                struct {
@@ -1184,6 +1189,7 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_S390_PROTECTED_ASYNC_DISABLE 224
 #define KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP 225
 #define KVM_CAP_PMU_EVENT_MASKED_EVENTS 226
+#define KVM_CAP_COUNTER_OFFSET 227
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
@@ -1543,6 +1549,8 @@ struct kvm_s390_ucas_mapping {
 #define KVM_SET_PMU_EVENT_FILTER  _IOW(KVMIO,  0xb2, struct kvm_pmu_event_filter)
 #define KVM_PPC_SVM_OFF                  _IO(KVMIO,  0xb3)
 #define KVM_ARM_MTE_COPY_TAGS    _IOR(KVMIO,  0xb4, struct kvm_arm_copy_mte_tags)
+/* Available with KVM_CAP_COUNTER_OFFSET */
+#define KVM_ARM_SET_COUNTER_OFFSET _IOW(KVMIO,  0xb5, struct kvm_arm_counter_offset)
 
 /* ioctl for vm fd */
 #define KVM_CREATE_DEVICE        _IOWR(KVMIO,  0xe0, struct kvm_create_device)
index 8c4e3e536c04285e155617cde126f2c4fce10e08..639524b59930bfcabcd0d352048a3fd4aa1da08b 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause */
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
 /* Do not edit directly, auto-generated from: */
 /*     Documentation/netlink/specs/netdev.yaml */
 /* YNL-GEN uapi header */
@@ -33,6 +33,8 @@ enum netdev_xdp_act {
        NETDEV_XDP_ACT_HW_OFFLOAD = 16,
        NETDEV_XDP_ACT_RX_SG = 32,
        NETDEV_XDP_ACT_NDO_XMIT_SG = 64,
+
+       NETDEV_XDP_ACT_MASK = 127,
 };
 
 enum {
index 25a0af57dd5ed3c204897f8da892a4c378bf5c08..51c13cf9c5aee4a2d1ab33c1a89043383d67b9cf 100644 (file)
@@ -789,6 +789,7 @@ enum {
        TCA_ROOT_FLAGS,
        TCA_ROOT_COUNT,
        TCA_ROOT_TIME_DELTA, /* in msecs */
+       TCA_ROOT_EXT_WARN_MSG,
        __TCA_ROOT_MAX,
 #define        TCA_ROOT_MAX (__TCA_ROOT_MAX - 1)
 };
index 655d92e803e1498d35b9a96e2c2a7c645c6ba726..79a443c65ea93c0f93696aa83711ce64b138615b 100644 (file)
@@ -483,6 +483,8 @@ struct xenpf_symdata {
 };
 DEFINE_GUEST_HANDLE_STRUCT(xenpf_symdata);
 
+#define XENPF_get_dom0_console 64
+
 struct xen_platform_op {
        uint32_t cmd;
        uint32_t interface_version; /* XENPF_INTERFACE_VERSION */
@@ -506,6 +508,7 @@ struct xen_platform_op {
                struct xenpf_mem_hotadd        mem_add;
                struct xenpf_core_parking      core_parking;
                struct xenpf_symdata           symdata;
+               struct dom0_vga_console_info   dom0_console;
                uint8_t                        pad[128];
        } u;
 };
index 4425d1783d5c21e8f1da703e503a5edc37a3d93f..bb87b789c54396920870b99167735fb4df8ac0cf 100644 (file)
@@ -156,7 +156,7 @@ static char *extra_init_args;
 
 #ifdef CONFIG_BOOT_CONFIG
 /* Is bootconfig on command line? */
-static bool bootconfig_found = IS_ENABLED(CONFIG_BOOT_CONFIG_FORCE);
+static bool bootconfig_found;
 static size_t initargs_offs;
 #else
 # define bootconfig_found false
@@ -429,7 +429,7 @@ static void __init setup_boot_config(void)
        err = parse_args("bootconfig", tmp_cmdline, NULL, 0, 0, 0, NULL,
                         bootconfig_params);
 
-       if (IS_ERR(err) || !bootconfig_found)
+       if (IS_ERR(err) || !(bootconfig_found || IS_ENABLED(CONFIG_BOOT_CONFIG_FORCE)))
                return;
 
        /* parse_args() stops at the next param of '--' and returns an address */
@@ -437,7 +437,11 @@ static void __init setup_boot_config(void)
                initargs_offs = err - tmp_cmdline;
 
        if (!data) {
-               pr_err("'bootconfig' found on command line, but no bootconfig found\n");
+               /* If user intended to use bootconfig, show an error level message */
+               if (bootconfig_found)
+                       pr_err("'bootconfig' found on command line, but no bootconfig found\n");
+               else
+                       pr_info("No bootconfig data provided, so skipping bootconfig");
                return;
        }
 
index 68dfc6936aa7250181a981dba6bb58e1ba6115d3..b80614e7d605112507363644c510d6312b65259d 100644 (file)
@@ -19,6 +19,9 @@ static int io_file_bitmap_get(struct io_ring_ctx *ctx)
        unsigned long nr = ctx->file_alloc_end;
        int ret;
 
+       if (!table->bitmap)
+               return -ENFILE;
+
        do {
                ret = find_next_zero_bit(table->bitmap, nr, table->alloc_hint);
                if (ret != nr)
index 8803c0979e2a91345d5dc1c29da1686a224c241d..85fd7ce5f05b85f10fc8ab91af8b428629356140 100644 (file)
@@ -202,7 +202,7 @@ static int io_msg_install_complete(struct io_kiocb *req, unsigned int issue_flag
         * completes with -EOVERFLOW, then the sender must ensure that a
         * later IORING_OP_MSG_RING delivers the message.
         */
-       if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
+       if (!io_post_aux_cqe(target_ctx, msg->user_data, ret, 0))
                ret = -EOVERFLOW;
 out_unlock:
        io_double_unlock_ctx(target_ctx);
@@ -229,6 +229,8 @@ static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
        struct io_ring_ctx *ctx = req->ctx;
        struct file *src_file = msg->src_file;
 
+       if (msg->len)
+               return -EINVAL;
        if (target_ctx == ctx)
                return -EINVAL;
        if (target_ctx->flags & IORING_SETUP_R_DISABLED)
index b7f190ca528e6e259eb2b072d7a16aaba98848cb..4040cf093318cb2c3f101014f0a299c257f15039 100644 (file)
@@ -47,6 +47,7 @@ struct io_connect {
        struct sockaddr __user          *addr;
        int                             addr_len;
        bool                            in_progress;
+       bool                            seen_econnaborted;
 };
 
 struct io_sr_msg {
@@ -1424,7 +1425,7 @@ int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 
        conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
        conn->addr_len =  READ_ONCE(sqe->addr2);
-       conn->in_progress = false;
+       conn->in_progress = conn->seen_econnaborted = false;
        return 0;
 }
 
@@ -1461,18 +1462,24 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
 
        ret = __sys_connect_file(req->file, &io->address,
                                        connect->addr_len, file_flags);
-       if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
+       if ((ret == -EAGAIN || ret == -EINPROGRESS || ret == -ECONNABORTED)
+           && force_nonblock) {
                if (ret == -EINPROGRESS) {
                        connect->in_progress = true;
-               } else {
-                       if (req_has_async_data(req))
-                               return -EAGAIN;
-                       if (io_alloc_async_data(req)) {
-                               ret = -ENOMEM;
+                       return -EAGAIN;
+               }
+               if (ret == -ECONNABORTED) {
+                       if (connect->seen_econnaborted)
                                goto out;
-                       }
-                       memcpy(req->async_data, &__io, sizeof(__io));
+                       connect->seen_econnaborted = true;
+               }
+               if (req_has_async_data(req))
+                       return -EAGAIN;
+               if (io_alloc_async_data(req)) {
+                       ret = -ENOMEM;
+                       goto out;
                }
+               memcpy(req->async_data, &__io, sizeof(__io));
                return -EAGAIN;
        }
        if (ret == -ERESTARTSYS)
index 056f40946ff68ff8d2d72781c5bdc9e3e303cea4..7a43aed8e395cb4d22885e0800bf193c862f0b1f 100644 (file)
@@ -410,7 +410,7 @@ __cold static int io_rsrc_data_alloc(struct io_ring_ctx *ctx,
                                     unsigned nr, struct io_rsrc_data **pdata)
 {
        struct io_rsrc_data *data;
-       int ret = -ENOMEM;
+       int ret = 0;
        unsigned i;
 
        data = kzalloc(sizeof(*data), GFP_KERNEL);
@@ -794,6 +794,7 @@ void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
        }
 #endif
        io_free_file_tables(&ctx->file_table);
+       io_file_table_set_alloc_range(ctx, 0, 0);
        io_rsrc_data_free(ctx->file_data);
        ctx->file_data = NULL;
        ctx->nr_user_files = 0;
@@ -1235,7 +1236,13 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
                        }
                }
                if (folio) {
-                       folio_put_refs(folio, nr_pages - 1);
+                       /*
+                        * The pages are bound to the folio, it doesn't
+                        * actually unpin them but drops all but one reference,
+                        * which is usually put down by io_buffer_unmap().
+                        * Note, needs a better helper.
+                        */
+                       unpin_user_pages(&pages[1], nr_pages - 1);
                        nr_pages = 1;
                }
        }
index 0119d3f1a55698a9958d26d9baadb68e3028141a..9db4bc1f521a3162d3480000cdc82e9a1aacfe62 100644 (file)
@@ -233,7 +233,6 @@ static int io_sq_thread(void *data)
                set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu));
        else
                set_cpus_allowed_ptr(current, cpu_online_mask);
-       current->flags |= PF_NO_SETAFFINITY;
 
        mutex_lock(&sqd->lock);
        while (1) {
index 2e4c483075d331f70b2dafa11797e8f7c3fac757..9a1dee5718724a109461560b39fc789132d6a13b 100644 (file)
 static void io_uring_cmd_work(struct io_kiocb *req, bool *locked)
 {
        struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
+       unsigned issue_flags = *locked ? 0 : IO_URING_F_UNLOCKED;
 
-       ioucmd->task_work_cb(ioucmd);
+       ioucmd->task_work_cb(ioucmd, issue_flags);
 }
 
 void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
-                       void (*task_work_cb)(struct io_uring_cmd *))
+                       void (*task_work_cb)(struct io_uring_cmd *, unsigned))
 {
        struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
 
@@ -42,7 +43,8 @@ static inline void io_req_set_cqe32_extra(struct io_kiocb *req,
  * Called by consumers of io_uring_cmd, if they originally returned
  * -EIOCBQUEUED upon receiving the command.
  */
-void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2)
+void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2,
+                      unsigned issue_flags)
 {
        struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
 
@@ -56,7 +58,7 @@ void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2)
                /* order with io_iopoll_req_issued() checking ->iopoll_complete */
                smp_store_release(&req->iopoll_completed, 1);
        else
-               io_req_complete_post(req, 0);
+               io_req_complete_post(req, issue_flags);
 }
 EXPORT_SYMBOL_GPL(io_uring_cmd_done);
 
index b297e9f60ca1015ec0d788b54f6381b2e9140d6a..e2d256c820723ffb7f2264fc45bfcbc841036974 100644 (file)
@@ -972,7 +972,7 @@ static int __init bpf_jit_charge_init(void)
 {
        /* Only used as heuristic here to derive limit. */
        bpf_jit_limit_max = bpf_jit_alloc_exec_limit();
-       bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 2,
+       bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 1,
                                            PAGE_SIZE), LONG_MAX);
        return 0;
 }
index 272563a0b7702a98f4060be952b85570934e210c..d517d13878cfe720ba62f7b04cf4f8801cd63d81 100644 (file)
@@ -3826,6 +3826,8 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
                                                continue;
                                        if (type == STACK_MISC)
                                                continue;
+                                       if (type == STACK_INVALID && env->allow_uninit_stack)
+                                               continue;
                                        verbose(env, "invalid read from stack off %d+%d size %d\n",
                                                off, i, size);
                                        return -EACCES;
@@ -3863,6 +3865,8 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
                                continue;
                        if (type == STACK_ZERO)
                                continue;
+                       if (type == STACK_INVALID && env->allow_uninit_stack)
+                               continue;
                        verbose(env, "invalid read from stack off %d+%d size %d\n",
                                off, i, size);
                        return -EACCES;
@@ -5754,7 +5758,8 @@ static int check_stack_range_initialized(
                stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
                if (*stype == STACK_MISC)
                        goto mark;
-               if (*stype == STACK_ZERO) {
+               if ((*stype == STACK_ZERO) ||
+                   (*stype == STACK_INVALID && env->allow_uninit_stack)) {
                        if (clobber) {
                                /* helper can write anything into the stack */
                                *stype = STACK_MISC;
@@ -13936,6 +13941,10 @@ static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
                if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
                        continue;
 
+               if (env->allow_uninit_stack &&
+                   old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC)
+                       continue;
+
                /* explored stack has more populated slots than current stack
                 * and these slots were used
                 */
index 55551989d9da50215c2c0954d55baa2a83bc03a9..fb50f29d9b361db607391c0228c9ee9a6f713b38 100644 (file)
@@ -152,7 +152,7 @@ COMPAT_SYSCALL_DEFINE3(sched_getaffinity, compat_pid_t,  pid, unsigned int, len,
        if (len & (sizeof(compat_ulong_t)-1))
                return -EINVAL;
 
-       if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+       if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
                return -ENOMEM;
 
        ret = sched_getaffinity(pid, mask);
index 846add8394c4159c8e5accb77ecc10e463519e3b..be61332c66b54a02b37d7a1847001b23a82d910e 100644 (file)
@@ -21,7 +21,7 @@ static __always_inline void __enter_from_user_mode(struct pt_regs *regs)
        arch_enter_from_user_mode(regs);
        lockdep_hardirqs_off(CALLER_ADDR0);
 
-       CT_WARN_ON(ct_state() != CONTEXT_USER);
+       CT_WARN_ON(__ct_state() != CONTEXT_USER);
        user_exit_irqoff();
 
        instrumentation_begin();
@@ -192,13 +192,14 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
 
 static void exit_to_user_mode_prepare(struct pt_regs *regs)
 {
-       unsigned long ti_work = read_thread_flags();
+       unsigned long ti_work;
 
        lockdep_assert_irqs_disabled();
 
        /* Flush pending rcuog wakeup before the last need_resched() check */
        tick_nohz_user_enter_prepare();
 
+       ti_work = read_thread_flags();
        if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK))
                ti_work = exit_to_user_mode_loop(regs, ti_work);
 
index f79fd8b87f75edc22453a47dee8c9b14855e8eeb..fb3e436bcd4ac7e8a04da3e16123f3d8b0d066a9 100644 (file)
@@ -2163,7 +2163,7 @@ static void perf_group_detach(struct perf_event *event)
                /* Inherit group flags from the previous leader */
                sibling->group_caps = event->group_caps;
 
-               if (!RB_EMPTY_NODE(&event->group_node)) {
+               if (sibling->attach_state & PERF_ATTACH_CONTEXT) {
                        add_event_to_groups(sibling, event->ctx);
 
                        if (sibling->state == PERF_EVENT_STATE_ACTIVE)
@@ -3872,7 +3872,7 @@ ctx_sched_in(struct perf_event_context *ctx, enum event_type_t event_type)
        if (likely(!ctx->nr_events))
                return;
 
-       if (is_active ^ EVENT_TIME) {
+       if (!(is_active & EVENT_TIME)) {
                /* start ctx time */
                __update_context_time(ctx, false);
                perf_cgroup_set_timestamp(cpuctx);
@@ -9187,7 +9187,7 @@ static void perf_event_bpf_output(struct perf_event *event, void *data)
 
        perf_event_header__init_id(&bpf_event->event_id.header,
                                   &sample, event);
-       ret = perf_output_begin(&handle, data, event,
+       ret = perf_output_begin(&handle, &sample, event,
                                bpf_event->event_id.header.size);
        if (ret)
                return;
index d8cda4c6de6c705051ffa2c14aa83a68c989da2f..c0257cbee0931acff05a980d428af2172fcb6d84 100644 (file)
@@ -755,11 +755,6 @@ static void check_mm(struct mm_struct *mm)
        for (i = 0; i < NR_MM_COUNTERS; i++) {
                long x = percpu_counter_sum(&mm->rss_stat[i]);
 
-               if (likely(!x))
-                       continue;
-
-               /* Making sure this is not due to race with CPU offlining. */
-               x = percpu_counter_sum_all(&mm->rss_stat[i]);
                if (unlikely(x))
                        pr_alert("BUG: Bad rss-counter state mm:%p type:%s val:%ld\n",
                                 mm, resident_page_types[i], x);
index 8cf70f068d92d31fda0d147782d55760c6854b5a..a45f3dfc8d14169ae3ea4ee875897213f81236be 100644 (file)
@@ -16,6 +16,6 @@ obj-y := core.o debugfs.o report.o
 KCSAN_INSTRUMENT_BARRIERS_selftest.o := y
 obj-$(CONFIG_KCSAN_SELFTEST) += selftest.o
 
-CFLAGS_kcsan_test.o := $(CFLAGS_KCSAN) -g -fno-omit-frame-pointer
+CFLAGS_kcsan_test.o := $(CFLAGS_KCSAN) -fno-omit-frame-pointer
 CFLAGS_kcsan_test.o += $(DISABLE_STRUCTLEAK_PLUGIN)
 obj-$(CONFIG_KCSAN_KUNIT_TEST) += kcsan_test.o
index af017e038b482f47b0783efe6c089b1c5c9bfab0..0d18c3969f90400e5c91e1e0132268dcff5feb65 100644 (file)
@@ -2084,6 +2084,9 @@ static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
 
 void activate_task(struct rq *rq, struct task_struct *p, int flags)
 {
+       if (task_on_rq_migrating(p))
+               flags |= ENQUEUE_MIGRATED;
+
        enqueue_task(rq, p, flags);
 
        p->on_rq = TASK_ON_RQ_QUEUED;
@@ -8414,14 +8417,14 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
        if (len & (sizeof(unsigned long)-1))
                return -EINVAL;
 
-       if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+       if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
                return -ENOMEM;
 
        ret = sched_getaffinity(pid, mask);
        if (ret == 0) {
                unsigned int retlen = min(len, cpumask_size());
 
-               if (copy_to_user(user_mask_ptr, mask, retlen))
+               if (copy_to_user(user_mask_ptr, cpumask_bits(mask), retlen))
                        ret = -EFAULT;
                else
                        ret = retlen;
index 7a1b1f855b9635e75282913850b70ffba2006322..6986ea31c9844719cf083ee1b60e3163add9c9db 100644 (file)
@@ -4648,11 +4648,33 @@ static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
 #endif
 }
 
+static inline bool entity_is_long_sleeper(struct sched_entity *se)
+{
+       struct cfs_rq *cfs_rq;
+       u64 sleep_time;
+
+       if (se->exec_start == 0)
+               return false;
+
+       cfs_rq = cfs_rq_of(se);
+
+       sleep_time = rq_clock_task(rq_of(cfs_rq));
+
+       /* Happen while migrating because of clock task divergence */
+       if (sleep_time <= se->exec_start)
+               return false;
+
+       sleep_time -= se->exec_start;
+       if (sleep_time > ((1ULL << 63) / scale_load_down(NICE_0_LOAD)))
+               return true;
+
+       return false;
+}
+
 static void
 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
 {
        u64 vruntime = cfs_rq->min_vruntime;
-       u64 sleep_time;
 
        /*
         * The 'current' period is already promised to the current tasks,
@@ -4684,13 +4706,24 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
 
        /*
         * Pull vruntime of the entity being placed to the base level of
-        * cfs_rq, to prevent boosting it if placed backwards.  If the entity
-        * slept for a long time, don't even try to compare its vruntime with
-        * the base as it may be too far off and the comparison may get
-        * inversed due to s64 overflow.
-        */
-       sleep_time = rq_clock_task(rq_of(cfs_rq)) - se->exec_start;
-       if ((s64)sleep_time > 60LL * NSEC_PER_SEC)
+        * cfs_rq, to prevent boosting it if placed backwards.
+        * However, min_vruntime can advance much faster than real time, with
+        * the extreme being when an entity with the minimal weight always runs
+        * on the cfs_rq. If the waking entity slept for a long time, its
+        * vruntime difference from min_vruntime may overflow s64 and their
+        * comparison may get inversed, so ignore the entity's original
+        * vruntime in that case.
+        * The maximal vruntime speedup is given by the ratio of normal to
+        * minimal weight: scale_load_down(NICE_0_LOAD) / MIN_SHARES.
+        * When placing a migrated waking entity, its exec_start has been set
+        * from a different rq. In order to take into account a possible
+        * divergence between new and prev rq's clocks task because of irq and
+        * stolen time, we take an additional margin.
+        * So, cutting off on the sleep time of
+        *     2^63 / scale_load_down(NICE_0_LOAD) ~ 104 days
+        * should be safe.
+        */
+       if (entity_is_long_sleeper(se))
                se->vruntime = vruntime;
        else
                se->vruntime = max_vruntime(se->vruntime, vruntime);
@@ -4770,6 +4803,9 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
 
        if (flags & ENQUEUE_WAKEUP)
                place_entity(cfs_rq, se, 0);
+       /* Entity has migrated, no longer consider this task hot */
+       if (flags & ENQUEUE_MIGRATED)
+               se->exec_start = 0;
 
        check_schedstat_required();
        update_stats_enqueue_fair(cfs_rq, se, flags);
@@ -7657,9 +7693,6 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
        /* Tell new CPU we are migrated */
        se->avg.last_update_time = 0;
 
-       /* We have migrated, no longer consider this task hot */
-       se->exec_start = 0;
-
        update_scan_period(p, new_cpu);
 }
 
index 29baa97d0d534284237a4376dda1d88aca6f7c06..0feea145bb2995e17ef79cad1596867be5f8caca 100644 (file)
@@ -1564,7 +1564,8 @@ static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end)
        key.flags = end;        /* overload flags, as it is unsigned long */
 
        for (pg = ftrace_pages_start; pg; pg = pg->next) {
-               if (end < pg->records[0].ip ||
+               if (pg->index == 0 ||
+                   end < pg->records[0].ip ||
                    start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
                        continue;
                rec = bsearch(&key, pg->records, pg->index,
@@ -2591,7 +2592,7 @@ static void call_direct_funcs(unsigned long ip, unsigned long pip,
        arch_ftrace_set_direct_caller(fregs, addr);
 }
 
-struct ftrace_ops direct_ops = {
+static struct ftrace_ops direct_ops = {
        .func           = call_direct_funcs,
        .flags          = FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS
                          | FTRACE_OPS_FL_PERMANENT,
index 4850fdfe27f1cb040118eed41c8a3036c4926d2c..5a4b722b50451bfdee42769a6d3be39c055690d1 100644 (file)
@@ -146,7 +146,7 @@ static int __init test_gen_kprobe_cmd(void)
        if (trace_event_file_is_valid(gen_kprobe_test))
                gen_kprobe_test = NULL;
        /* We got an error after creating the event, delete it */
-       ret = kprobe_event_delete("gen_kprobe_test");
+       kprobe_event_delete("gen_kprobe_test");
        goto out;
 }
 
@@ -211,7 +211,7 @@ static int __init test_gen_kretprobe_cmd(void)
        if (trace_event_file_is_valid(gen_kretprobe_test))
                gen_kretprobe_test = NULL;
        /* We got an error after creating the event, delete it */
-       ret = kprobe_event_delete("gen_kretprobe_test");
+       kprobe_event_delete("gen_kretprobe_test");
        goto out;
 }
 
index af50d931b020227991e2e63f4e961c9d0618323f..c6f47b6cfd5f1b9420e2e0040a1734246fa875ef 100644 (file)
@@ -354,10 +354,6 @@ static void rb_init_page(struct buffer_data_page *bpage)
        local_set(&bpage->commit, 0);
 }
 
-/*
- * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
- * this issue out.
- */
 static void free_buffer_page(struct buffer_page *bpage)
 {
        free_page((unsigned long)bpage->page);
index 45551c7b4c36588fac4378f04fe4a4d7ef8e36a9..937e9676dfd42fbff3e4efa02aab169325f10d84 100644 (file)
@@ -5167,6 +5167,8 @@ loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
 static const struct file_operations tracing_fops = {
        .open           = tracing_open,
        .read           = seq_read,
+       .read_iter      = seq_read_iter,
+       .splice_read    = generic_file_splice_read,
        .write          = tracing_write_stub,
        .llseek         = tracing_lseek,
        .release        = tracing_release,
index 89877a18f93307fce29789166b68a60d9a7f0c1e..486cca3c2b75450f23f8b680045421cf234d232f 100644 (file)
@@ -1331,6 +1331,9 @@ static const char *hist_field_name(struct hist_field *field,
 {
        const char *field_name = "";
 
+       if (WARN_ON_ONCE(!field))
+               return field_name;
+
        if (level > 1)
                return field_name;
 
@@ -4235,6 +4238,15 @@ static int __create_val_field(struct hist_trigger_data *hist_data,
                goto out;
        }
 
+       /* Some types cannot be a value */
+       if (hist_field->flags & (HIST_FIELD_FL_GRAPH | HIST_FIELD_FL_PERCENT |
+                                HIST_FIELD_FL_BUCKET | HIST_FIELD_FL_LOG2 |
+                                HIST_FIELD_FL_SYM | HIST_FIELD_FL_SYM_OFFSET |
+                                HIST_FIELD_FL_SYSCALL | HIST_FIELD_FL_STACKTRACE)) {
+               hist_err(file->tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(field_str));
+               ret = -EINVAL;
+       }
+
        hist_data->fields[val_idx] = hist_field;
 
        ++hist_data->n_vals;
index d440ddd5fd8b29adf605075004fc5edd9a7ee17c..2f37a6e68aa9f851f19b968eeafdb1034d3efae2 100644 (file)
@@ -339,7 +339,7 @@ static void move_to_next_cpu(void)
        cpumask_clear(current_mask);
        cpumask_set_cpu(next_cpu, current_mask);
 
-       sched_setaffinity(0, current_mask);
+       set_cpus_allowed_ptr(current, current_mask);
        return;
 
  change_mode:
@@ -446,7 +446,7 @@ static int start_single_kthread(struct trace_array *tr)
 
        }
 
-       sched_setaffinity(kthread->pid, current_mask);
+       set_cpus_allowed_ptr(kthread, current_mask);
 
        kdata->kthread = kthread;
        wake_up_process(kthread);
@@ -492,6 +492,10 @@ static int start_cpu_kthread(unsigned int cpu)
 {
        struct task_struct *kthread;
 
+       /* Do not start a new hwlatd thread if it is already running */
+       if (per_cpu(hwlat_per_cpu_data, cpu).kthread)
+               return 0;
+
        kthread = kthread_run_on_cpu(kthread_fn, NULL, cpu, "hwlatd/%u");
        if (IS_ERR(kthread)) {
                pr_err(BANNER "could not start sampling thread\n");
@@ -584,9 +588,6 @@ static int start_per_cpu_kthreads(struct trace_array *tr)
         */
        cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask);
 
-       for_each_online_cpu(cpu)
-               per_cpu(hwlat_per_cpu_data, cpu).kthread = NULL;
-
        for_each_cpu(cpu, current_mask) {
                retval = start_cpu_kthread(cpu);
                if (retval)
index 04f0fdae19a1c0382d6e0ec4f415a36d86d610f7..9176bb7a9bb459399b7518e3a1caf24e01d7a16f 100644 (file)
@@ -217,7 +217,7 @@ struct osnoise_variables {
 /*
  * Per-cpu runtime information.
  */
-DEFINE_PER_CPU(struct osnoise_variables, per_cpu_osnoise_var);
+static DEFINE_PER_CPU(struct osnoise_variables, per_cpu_osnoise_var);
 
 /*
  * this_cpu_osn_var - Return the per-cpu osnoise_variables on its relative CPU
@@ -240,7 +240,7 @@ struct timerlat_variables {
        u64                     count;
 };
 
-DEFINE_PER_CPU(struct timerlat_variables, per_cpu_timerlat_var);
+static DEFINE_PER_CPU(struct timerlat_variables, per_cpu_timerlat_var);
 
 /*
  * this_cpu_tmr_var - Return the per-cpu timerlat_variables on its relative CPU
@@ -332,7 +332,7 @@ struct timerlat_sample {
 /*
  * Protect the interface.
  */
-struct mutex interface_lock;
+static struct mutex interface_lock;
 
 /*
  * Tracer data.
@@ -2239,8 +2239,8 @@ static struct trace_min_max_param osnoise_print_stack = {
 /*
  * osnoise/timerlat_period: min 100 us, max 1 s
  */
-u64 timerlat_min_period = 100;
-u64 timerlat_max_period = 1000000;
+static u64 timerlat_min_period = 100;
+static u64 timerlat_max_period = 1000000;
 static struct trace_min_max_param timerlat_period = {
        .lock   = &interface_lock,
        .val    = &osnoise_data.timerlat_period,
index f9d33efa6d09060445dfbf481c01f5594dc37417..f15ac666e9d38bd23bb0cb4ed08beb626540a0c4 100644 (file)
@@ -31,6 +31,7 @@ MODULE_PARM_DESC(iterations,
 
 static void dhry_benchmark(void)
 {
+       unsigned int cpu = get_cpu();
        int i, n;
 
        if (iterations > 0) {
@@ -45,9 +46,10 @@ static void dhry_benchmark(void)
        }
 
 report:
+       put_cpu();
        if (n >= 0)
-               pr_info("CPU%u: Dhrystones per Second: %d (%d DMIPS)\n",
-                       smp_processor_id(), n, n / DHRY_VAX);
+               pr_info("CPU%u: Dhrystones per Second: %d (%d DMIPS)\n", cpu,
+                       n, n / DHRY_VAX);
        else if (n == -EAGAIN)
                pr_err("Please increase the number of iterations\n");
        else
index c10920e667889c1a2e86f5916b9998b21893ba77..32f99e9a670e64fcdc8e9a7e67d29df199486a69 100644 (file)
@@ -182,6 +182,15 @@ unsigned long _find_next_andnot_bit(const unsigned long *addr1, const unsigned l
 EXPORT_SYMBOL(_find_next_andnot_bit);
 #endif
 
+#ifndef find_next_or_bit
+unsigned long _find_next_or_bit(const unsigned long *addr1, const unsigned long *addr2,
+                                       unsigned long nbits, unsigned long start)
+{
+       return FIND_NEXT_BIT(addr1[idx] | addr2[idx], /* nop */, nbits, start);
+}
+EXPORT_SYMBOL(_find_next_or_bit);
+#endif
+
 #ifndef find_next_zero_bit
 unsigned long _find_next_zero_bit(const unsigned long *addr, unsigned long nbits,
                                         unsigned long start)
index 646297cae5d16dea90e9fbc0b22d02f2198c6b93..9e2735cbc2b49441d03d66fcf04b6d07ebed35c6 100644 (file)
@@ -5099,35 +5099,21 @@ static inline bool mas_rewind_node(struct ma_state *mas)
  */
 static inline bool mas_skip_node(struct ma_state *mas)
 {
-       unsigned char slot, slot_count;
-       unsigned long *pivots;
-       enum maple_type mt;
+       if (mas_is_err(mas))
+               return false;
 
-       mt = mte_node_type(mas->node);
-       slot_count = mt_slots[mt] - 1;
        do {
                if (mte_is_root(mas->node)) {
-                       slot = mas->offset;
-                       if (slot > slot_count) {
+                       if (mas->offset >= mas_data_end(mas)) {
                                mas_set_err(mas, -EBUSY);
                                return false;
                        }
                } else {
                        mas_ascend(mas);
-                       slot = mas->offset;
-                       mt = mte_node_type(mas->node);
-                       slot_count = mt_slots[mt] - 1;
                }
-       } while (slot > slot_count);
-
-       mas->offset = ++slot;
-       pivots = ma_pivots(mas_mn(mas), mt);
-       if (slot > 0)
-               mas->min = pivots[slot - 1] + 1;
-
-       if (slot <= slot_count)
-               mas->max = pivots[slot];
+       } while (mas->offset >= mas_data_end(mas));
 
+       mas->offset++;
        return true;
 }
 
index dba56c5c1837912132b5e01bd66eba8a8e3a01fa..5004463c4f9f1243674ca77c2aea8e7050edfbda 100644 (file)
@@ -122,8 +122,19 @@ void percpu_counter_sync(struct percpu_counter *fbc)
 }
 EXPORT_SYMBOL(percpu_counter_sync);
 
-static s64 __percpu_counter_sum_mask(struct percpu_counter *fbc,
-                             const struct cpumask *cpu_mask)
+/*
+ * Add up all the per-cpu counts, return the result.  This is a more accurate
+ * but much slower version of percpu_counter_read_positive().
+ *
+ * We use the cpu mask of (cpu_online_mask | cpu_dying_mask) to capture sums
+ * from CPUs that are in the process of being taken offline. Dying cpus have
+ * been removed from the online mask, but may not have had the hotplug dead
+ * notifier called to fold the percpu count back into the global counter sum.
+ * By including dying CPUs in the iteration mask, we avoid this race condition
+ * so __percpu_counter_sum() just does the right thing when CPUs are being taken
+ * offline.
+ */
+s64 __percpu_counter_sum(struct percpu_counter *fbc)
 {
        s64 ret;
        int cpu;
@@ -131,35 +142,15 @@ static s64 __percpu_counter_sum_mask(struct percpu_counter *fbc,
 
        raw_spin_lock_irqsave(&fbc->lock, flags);
        ret = fbc->count;
-       for_each_cpu(cpu, cpu_mask) {
+       for_each_cpu_or(cpu, cpu_online_mask, cpu_dying_mask) {
                s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
                ret += *pcount;
        }
        raw_spin_unlock_irqrestore(&fbc->lock, flags);
        return ret;
 }
-
-/*
- * Add up all the per-cpu counts, return the result.  This is a more accurate
- * but much slower version of percpu_counter_read_positive()
- */
-s64 __percpu_counter_sum(struct percpu_counter *fbc)
-{
-       return __percpu_counter_sum_mask(fbc, cpu_online_mask);
-}
 EXPORT_SYMBOL(__percpu_counter_sum);
 
-/*
- * This is slower version of percpu_counter_sum as it traverses all possible
- * cpus. Use this only in the cases where accurate data is needed in the
- * presense of CPUs getting offlined.
- */
-s64 percpu_counter_sum_all(struct percpu_counter *fbc)
-{
-       return __percpu_counter_sum_mask(fbc, cpu_possible_mask);
-}
-EXPORT_SYMBOL(percpu_counter_sum_all);
-
 int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
                          struct lock_class_key *key)
 {
index 3d19b1f78d7189a8a0a63241f765bbec12729d93..f1db333270e9fa4f2743a1e4c72420358eea5831 100644 (file)
@@ -2670,6 +2670,49 @@ static noinline void check_empty_area_window(struct maple_tree *mt)
        rcu_read_unlock();
 }
 
+static noinline void check_empty_area_fill(struct maple_tree *mt)
+{
+       const unsigned long max = 0x25D78000;
+       unsigned long size;
+       int loop, shift;
+       MA_STATE(mas, mt, 0, 0);
+
+       mt_set_non_kernel(99999);
+       for (shift = 12; shift <= 16; shift++) {
+               loop = 5000;
+               size = 1 << shift;
+               while (loop--) {
+                       mas_set(&mas, 0);
+                       mas_lock(&mas);
+                       MT_BUG_ON(mt, mas_empty_area(&mas, 0, max, size) != 0);
+                       MT_BUG_ON(mt, mas.last != mas.index + size - 1);
+                       mas_store_gfp(&mas, (void *)size, GFP_KERNEL);
+                       mas_unlock(&mas);
+                       mas_reset(&mas);
+               }
+       }
+
+       /* No space left. */
+       size = 0x1000;
+       rcu_read_lock();
+       MT_BUG_ON(mt, mas_empty_area(&mas, 0, max, size) != -EBUSY);
+       rcu_read_unlock();
+
+       /* Fill a depth 3 node to the maximum */
+       for (unsigned long i = 629440511; i <= 629440800; i += 6)
+               mtree_store_range(mt, i, i + 5, (void *)i, GFP_KERNEL);
+       /* Make space in the second-last depth 4 node */
+       mtree_erase(mt, 631668735);
+       /* Make space in the last depth 4 node */
+       mtree_erase(mt, 629506047);
+       mas_reset(&mas);
+       /* Search from just after the gap in the second-last depth 4 */
+       rcu_read_lock();
+       MT_BUG_ON(mt, mas_empty_area(&mas, 629506048, 690000000, 0x5000) != 0);
+       rcu_read_unlock();
+       mt_set_non_kernel(0);
+}
+
 static DEFINE_MTREE(tree);
 static int maple_tree_seed(void)
 {
@@ -2926,6 +2969,11 @@ static int maple_tree_seed(void)
        check_empty_area_window(&tree);
        mtree_destroy(&tree);
 
+       mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+       check_empty_area_fill(&tree);
+       mtree_destroy(&tree);
+
+
 #if defined(BENCH)
 skip:
 #endif
index 7a5bf44839c9ce0d633362451dc875c7679f5ba9..f06df065dec01126c07a225e009981b809caf2b2 100644 (file)
@@ -84,7 +84,7 @@ static uint64_t ZSTD_div64(uint64_t dividend, uint32_t divisor) {
 
 #include <linux/kernel.h>
 
-#define assert(x) WARN_ON((x))
+#define assert(x) WARN_ON(!(x))
 
 #endif /* ZSTD_DEPS_ASSERT */
 #endif /* ZSTD_DEPS_NEED_ASSERT */
index 89b269a641c7e03aeb8f54892e6c401f04ed91a0..60958afebc41506928bcc75199af86226c786443 100644 (file)
@@ -985,7 +985,7 @@ static void HUF_fillDTableX2Level2(HUF_DEltX2* DTable, U32 targetLog, const U32
 
 static void HUF_fillDTableX2(HUF_DEltX2* DTable, const U32 targetLog,
                            const sortedSymbol_t* sortedList,
-                           const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,
+                           const U32* rankStart, rankValCol_t *rankValOrigin, const U32 maxWeight,
                            const U32 nbBitsBaseline)
 {
     U32* const rankVal = rankValOrigin[0];
index b9b935a9f5c0da9c6c3c5d3fc8d38fccd4fcc6ce..6b3177c947114a74723d2cd905e959fc3299237e 100644 (file)
@@ -798,7 +798,7 @@ static size_t ZSTD_copyRawBlock(void* dst, size_t dstCapacity,
         if (srcSize == 0) return 0;
         RETURN_ERROR(dstBuffer_null, "");
     }
-    ZSTD_memcpy(dst, src, srcSize);
+    ZSTD_memmove(dst, src, srcSize);
     return srcSize;
 }
 
@@ -858,6 +858,7 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
 
     /* Loop on each block */
     while (1) {
+        BYTE* oBlockEnd = oend;
         size_t decodedSize;
         blockProperties_t blockProperties;
         size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSrcSize, &blockProperties);
@@ -867,16 +868,34 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
         remainingSrcSize -= ZSTD_blockHeaderSize;
         RETURN_ERROR_IF(cBlockSize > remainingSrcSize, srcSize_wrong, "");
 
+        if (ip >= op && ip < oBlockEnd) {
+            /* We are decompressing in-place. Limit the output pointer so that we
+             * don't overwrite the block that we are currently reading. This will
+             * fail decompression if the input & output pointers aren't spaced
+             * far enough apart.
+             *
+             * This is important to set, even when the pointers are far enough
+             * apart, because ZSTD_decompressBlock_internal() can decide to store
+             * literals in the output buffer, after the block it is decompressing.
+             * Since we don't want anything to overwrite our input, we have to tell
+             * ZSTD_decompressBlock_internal to never write past ip.
+             *
+             * See ZSTD_allocateLiteralsBuffer() for reference.
+             */
+            oBlockEnd = op + (ip - op);
+        }
+
         switch(blockProperties.blockType)
         {
         case bt_compressed:
-            decodedSize = ZSTD_decompressBlock_internal(dctx, op, (size_t)(oend-op), ip, cBlockSize, /* frame */ 1, not_streaming);
+            decodedSize = ZSTD_decompressBlock_internal(dctx, op, (size_t)(oBlockEnd-op), ip, cBlockSize, /* frame */ 1, not_streaming);
             break;
         case bt_raw :
+            /* Use oend instead of oBlockEnd because this function is safe to overlap. It uses memmove. */
             decodedSize = ZSTD_copyRawBlock(op, (size_t)(oend-op), ip, cBlockSize);
             break;
         case bt_rle :
-            decodedSize = ZSTD_setRleBlock(op, (size_t)(oend-op), *ip, blockProperties.origSize);
+            decodedSize = ZSTD_setRleBlock(op, (size_t)(oBlockEnd-op), *ip, blockProperties.origSize);
             break;
         case bt_reserved :
         default:
index 6c655d9b5639106722baed45b3f27fae1ca20664..dd9c33fbe805273f32fbc518356d722bacf4e09b 100644 (file)
@@ -130,7 +130,6 @@ static bool damon_pa_young(unsigned long paddr, unsigned long *folio_sz)
                        accessed = false;
                else
                        accessed = true;
-               folio_put(folio);
                goto out;
        }
 
@@ -144,10 +143,10 @@ static bool damon_pa_young(unsigned long paddr, unsigned long *folio_sz)
 
        if (need_lock)
                folio_unlock(folio);
-       folio_put(folio);
 
 out:
        *folio_sz = folio_size(folio);
+       folio_put(folio);
        return accessed;
 }
 
@@ -281,8 +280,8 @@ static inline unsigned long damon_pa_mark_accessed_or_deactivate(
                        folio_mark_accessed(folio);
                else
                        folio_deactivate(folio);
-               folio_put(folio);
                applied += folio_nr_pages(folio);
+               folio_put(folio);
        }
        return applied * PAGE_SIZE;
 }
index 4fc43859e59a31932a657cd2fac2b511c00e812b..032fb0ef9cd194064c8fb39748e6400871ed0af2 100644 (file)
@@ -2037,7 +2037,7 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
 {
        struct mm_struct *mm = vma->vm_mm;
        pgtable_t pgtable;
-       pmd_t _pmd;
+       pmd_t _pmd, old_pmd;
        int i;
 
        /*
@@ -2048,7 +2048,7 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
         *
         * See Documentation/mm/mmu_notifier.rst
         */
-       pmdp_huge_clear_flush(vma, haddr, pmd);
+       old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
 
        pgtable = pgtable_trans_huge_withdraw(mm, pmd);
        pmd_populate(mm, &_pmd, pgtable);
@@ -2057,6 +2057,8 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
                pte_t *pte, entry;
                entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
                entry = pte_mkspecial(entry);
+               if (pmd_uffd_wp(old_pmd))
+                       entry = pte_mkuffd_wp(entry);
                pte = pte_offset_map(&_pmd, haddr);
                VM_BUG_ON(!pte_none(*pte));
                set_pte_at(mm, haddr, pte, entry);
index 0bb95728a7845ff2210ede9c712743cee8dc6347..2de2a58d11a101d328fbb73862bd5bcaf27a1463 100644 (file)
@@ -2,5 +2,5 @@
 
 obj-y := core.o report.o
 
-CFLAGS_kfence_test.o := -g -fno-omit-frame-pointer -fno-optimize-sibling-calls
+CFLAGS_kfence_test.o := -fno-omit-frame-pointer -fno-optimize-sibling-calls
 obj-$(CONFIG_KFENCE_KUNIT_TEST) += kfence_test.o
index 5349c37a5dac9fc83f9a1eec0f12a9f6c6ab4b48..79c94ee55f97b6d5f803ea290ff8e6c4491aa04a 100644 (file)
@@ -726,10 +726,14 @@ static const struct seq_operations objects_sops = {
 };
 DEFINE_SEQ_ATTRIBUTE(objects);
 
-static int __init kfence_debugfs_init(void)
+static int kfence_debugfs_init(void)
 {
-       struct dentry *kfence_dir = debugfs_create_dir("kfence", NULL);
+       struct dentry *kfence_dir;
 
+       if (!READ_ONCE(kfence_enabled))
+               return 0;
+
+       kfence_dir = debugfs_create_dir("kfence", NULL);
        debugfs_create_file("stats", 0444, kfence_dir, NULL, &stats_fops);
        debugfs_create_file("objects", 0400, kfence_dir, NULL, &objects_fops);
        return 0;
@@ -883,6 +887,8 @@ static int kfence_init_late(void)
        }
 
        kfence_init_enable();
+       kfence_debugfs_init();
+
        return 0;
 }
 
index ad591b779d534127a0ccde6781db77ee32cee0dc..2b8d30068cbbd702862a91bd3ce9cfcecfce01e2 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -988,9 +988,15 @@ static int unmerge_and_remove_all_rmap_items(void)
 
                mm = mm_slot->slot.mm;
                mmap_read_lock(mm);
+
+               /*
+                * Exit right away if mm is exiting to avoid lockdep issue in
+                * the maple tree
+                */
+               if (ksm_test_exit(mm))
+                       goto mm_exiting;
+
                for_each_vma(vmi, vma) {
-                       if (ksm_test_exit(mm))
-                               break;
                        if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
                                continue;
                        err = unmerge_ksm_pages(vma,
@@ -999,6 +1005,7 @@ static int unmerge_and_remove_all_rmap_items(void)
                                goto error;
                }
 
+mm_exiting:
                remove_trailing_rmap_items(&mm_slot->rmap_list);
                mmap_read_unlock(mm);
 
index 98f1c11197a8c5f057ed23f5aea9cdc62536bed4..db3f154446af4e1210d7557bcb65eaa57cae03e8 100644 (file)
@@ -1112,9 +1112,8 @@ static void migrate_folio_done(struct folio *src,
 /* Obtain the lock on page, remove all ptes. */
 static int migrate_folio_unmap(new_page_t get_new_page, free_page_t put_new_page,
                               unsigned long private, struct folio *src,
-                              struct folio **dstp, int force, bool avoid_force_lock,
-                              enum migrate_mode mode, enum migrate_reason reason,
-                              struct list_head *ret)
+                              struct folio **dstp, enum migrate_mode mode,
+                              enum migrate_reason reason, struct list_head *ret)
 {
        struct folio *dst;
        int rc = -EAGAIN;
@@ -1144,7 +1143,7 @@ static int migrate_folio_unmap(new_page_t get_new_page, free_page_t put_new_page
        dst->private = NULL;
 
        if (!folio_trylock(src)) {
-               if (!force || mode == MIGRATE_ASYNC)
+               if (mode == MIGRATE_ASYNC)
                        goto out;
 
                /*
@@ -1163,17 +1162,6 @@ static int migrate_folio_unmap(new_page_t get_new_page, free_page_t put_new_page
                if (current->flags & PF_MEMALLOC)
                        goto out;
 
-               /*
-                * We have locked some folios and are going to wait to lock
-                * this folio.  To avoid a potential deadlock, let's bail
-                * out and not do that. The locked folios will be moved and
-                * unlocked, then we can wait to lock this folio.
-                */
-               if (avoid_force_lock) {
-                       rc = -EDEADLOCK;
-                       goto out;
-               }
-
                folio_lock(src);
        }
        locked = true;
@@ -1193,8 +1181,6 @@ static int migrate_folio_unmap(new_page_t get_new_page, free_page_t put_new_page
                        rc = -EBUSY;
                        goto out;
                }
-               if (!force)
-                       goto out;
                folio_wait_writeback(src);
        }
 
@@ -1253,7 +1239,7 @@ static int migrate_folio_unmap(new_page_t get_new_page, free_page_t put_new_page
                /* Establish migration ptes */
                VM_BUG_ON_FOLIO(folio_test_anon(src) &&
                               !folio_test_ksm(src) && !anon_vma, src);
-               try_to_migrate(src, TTU_BATCH_FLUSH);
+               try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0);
                page_was_mapped = 1;
        }
 
@@ -1267,7 +1253,7 @@ out:
         * A folio that has not been unmapped will be restored to
         * right list unless we want to retry.
         */
-       if (rc == -EAGAIN || rc == -EDEADLOCK)
+       if (rc == -EAGAIN)
                ret = NULL;
 
        migrate_folio_undo_src(src, page_was_mapped, anon_vma, locked, ret);
@@ -1508,6 +1494,9 @@ static inline int try_split_folio(struct folio *folio, struct list_head *split_f
 #define NR_MAX_BATCHED_MIGRATION       512
 #endif
 #define NR_MAX_MIGRATE_PAGES_RETRY     10
+#define NR_MAX_MIGRATE_ASYNC_RETRY     3
+#define NR_MAX_MIGRATE_SYNC_RETRY                                      \
+       (NR_MAX_MIGRATE_PAGES_RETRY - NR_MAX_MIGRATE_ASYNC_RETRY)
 
 struct migrate_pages_stats {
        int nr_succeeded;       /* Normal and large folios migrated successfully, in
@@ -1618,13 +1607,19 @@ static int migrate_hugetlbs(struct list_head *from, new_page_t get_new_page,
 /*
  * migrate_pages_batch() first unmaps folios in the from list as many as
  * possible, then move the unmapped folios.
+ *
+ * We only batch migration if mode == MIGRATE_ASYNC to avoid to wait a
+ * lock or bit when we have locked more than one folio.  Which may cause
+ * deadlock (e.g., for loop device).  So, if mode != MIGRATE_ASYNC, the
+ * length of the from list must be <= 1.
  */
 static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
                free_page_t put_new_page, unsigned long private,
                enum migrate_mode mode, int reason, struct list_head *ret_folios,
-               struct migrate_pages_stats *stats)
+               struct list_head *split_folios, struct migrate_pages_stats *stats,
+               int nr_pass)
 {
-       int retry;
+       int retry = 1;
        int large_retry = 1;
        int thp_retry = 1;
        int nr_failed = 0;
@@ -1634,21 +1629,15 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
        bool is_large = false;
        bool is_thp = false;
        struct folio *folio, *folio2, *dst = NULL, *dst2;
-       int rc, rc_saved, nr_pages;
-       LIST_HEAD(split_folios);
+       int rc, rc_saved = 0, nr_pages;
        LIST_HEAD(unmap_folios);
        LIST_HEAD(dst_folios);
        bool nosplit = (reason == MR_NUMA_MISPLACED);
-       bool no_split_folio_counting = false;
-       bool avoid_force_lock;
 
-retry:
-       rc_saved = 0;
-       avoid_force_lock = false;
-       retry = 1;
-       for (pass = 0;
-            pass < NR_MAX_MIGRATE_PAGES_RETRY && (retry || large_retry);
-            pass++) {
+       VM_WARN_ON_ONCE(mode != MIGRATE_ASYNC &&
+                       !list_empty(from) && !list_is_singular(from));
+
+       for (pass = 0; pass < nr_pass && (retry || large_retry); pass++) {
                retry = 0;
                large_retry = 0;
                thp_retry = 0;
@@ -1679,7 +1668,7 @@ retry:
                        if (!thp_migration_supported() && is_thp) {
                                nr_large_failed++;
                                stats->nr_thp_failed++;
-                               if (!try_split_folio(folio, &split_folios)) {
+                               if (!try_split_folio(folio, split_folios)) {
                                        stats->nr_thp_split++;
                                        continue;
                                }
@@ -1689,15 +1678,13 @@ retry:
                        }
 
                        rc = migrate_folio_unmap(get_new_page, put_new_page, private,
-                                                folio, &dst, pass > 2, avoid_force_lock,
-                                                mode, reason, ret_folios);
+                                                folio, &dst, mode, reason, ret_folios);
                        /*
                         * The rules are:
                         *      Success: folio will be freed
                         *      Unmap: folio will be put on unmap_folios list,
                         *             dst folio put on dst_folios list
                         *      -EAGAIN: stay on the from list
-                        *      -EDEADLOCK: stay on the from list
                         *      -ENOMEM: stay on the from list
                         *      Other errno: put on ret_folios list
                         */
@@ -1712,7 +1699,7 @@ retry:
                                        stats->nr_thp_failed += is_thp;
                                        /* Large folio NUMA faulting doesn't split to retry. */
                                        if (!nosplit) {
-                                               int ret = try_split_folio(folio, &split_folios);
+                                               int ret = try_split_folio(folio, split_folios);
 
                                                if (!ret) {
                                                        stats->nr_thp_split += is_thp;
@@ -1729,18 +1716,11 @@ retry:
                                                        break;
                                                }
                                        }
-                               } else if (!no_split_folio_counting) {
+                               } else {
                                        nr_failed++;
                                }
 
                                stats->nr_failed_pages += nr_pages + nr_retry_pages;
-                               /*
-                                * There might be some split folios of fail-to-migrate large
-                                * folios left in split_folios list. Move them to ret_folios
-                                * list so that they could be put back to the right list by
-                                * the caller otherwise the folio refcnt will be leaked.
-                                */
-                               list_splice_init(&split_folios, ret_folios);
                                /* nr_failed isn't updated for not used */
                                nr_large_failed += large_retry;
                                stats->nr_thp_failed += thp_retry;
@@ -1749,19 +1729,11 @@ retry:
                                        goto out;
                                else
                                        goto move;
-                       case -EDEADLOCK:
-                               /*
-                                * The folio cannot be locked for potential deadlock.
-                                * Go move (and unlock) all locked folios.  Then we can
-                                * try again.
-                                */
-                               rc_saved = rc;
-                               goto move;
                        case -EAGAIN:
                                if (is_large) {
                                        large_retry++;
                                        thp_retry += is_thp;
-                               } else if (!no_split_folio_counting) {
+                               } else {
                                        retry++;
                                }
                                nr_retry_pages += nr_pages;
@@ -1771,11 +1743,6 @@ retry:
                                stats->nr_thp_succeeded += is_thp;
                                break;
                        case MIGRATEPAGE_UNMAP:
-                               /*
-                                * We have locked some folios, don't force lock
-                                * to avoid deadlock.
-                                */
-                               avoid_force_lock = true;
                                list_move_tail(&folio->lru, &unmap_folios);
                                list_add_tail(&dst->lru, &dst_folios);
                                break;
@@ -1789,7 +1756,7 @@ retry:
                                if (is_large) {
                                        nr_large_failed++;
                                        stats->nr_thp_failed += is_thp;
-                               } else if (!no_split_folio_counting) {
+                               } else {
                                        nr_failed++;
                                }
 
@@ -1807,9 +1774,7 @@ move:
        try_to_unmap_flush();
 
        retry = 1;
-       for (pass = 0;
-            pass < NR_MAX_MIGRATE_PAGES_RETRY && (retry || large_retry);
-            pass++) {
+       for (pass = 0; pass < nr_pass && (retry || large_retry); pass++) {
                retry = 0;
                large_retry = 0;
                thp_retry = 0;
@@ -1838,7 +1803,7 @@ move:
                                if (is_large) {
                                        large_retry++;
                                        thp_retry += is_thp;
-                               } else if (!no_split_folio_counting) {
+                               } else {
                                        retry++;
                                }
                                nr_retry_pages += nr_pages;
@@ -1851,7 +1816,7 @@ move:
                                if (is_large) {
                                        nr_large_failed++;
                                        stats->nr_thp_failed += is_thp;
-                               } else if (!no_split_folio_counting) {
+                               } else {
                                        nr_failed++;
                                }
 
@@ -1888,30 +1853,52 @@ out:
                dst2 = list_next_entry(dst, lru);
        }
 
-       /*
-        * Try to migrate split folios of fail-to-migrate large folios, no
-        * nr_failed counting in this round, since all split folios of a
-        * large folio is counted as 1 failure in the first round.
-        */
-       if (rc >= 0 && !list_empty(&split_folios)) {
-               /*
-                * Move non-migrated folios (after NR_MAX_MIGRATE_PAGES_RETRY
-                * retries) to ret_folios to avoid migrating them again.
-                */
-               list_splice_init(from, ret_folios);
-               list_splice_init(&split_folios, from);
-               no_split_folio_counting = true;
-               goto retry;
-       }
+       return rc;
+}
 
+static int migrate_pages_sync(struct list_head *from, new_page_t get_new_page,
+               free_page_t put_new_page, unsigned long private,
+               enum migrate_mode mode, int reason, struct list_head *ret_folios,
+               struct list_head *split_folios, struct migrate_pages_stats *stats)
+{
+       int rc, nr_failed = 0;
+       LIST_HEAD(folios);
+       struct migrate_pages_stats astats;
+
+       memset(&astats, 0, sizeof(astats));
+       /* Try to migrate in batch with MIGRATE_ASYNC mode firstly */
+       rc = migrate_pages_batch(from, get_new_page, put_new_page, private, MIGRATE_ASYNC,
+                                reason, &folios, split_folios, &astats,
+                                NR_MAX_MIGRATE_ASYNC_RETRY);
+       stats->nr_succeeded += astats.nr_succeeded;
+       stats->nr_thp_succeeded += astats.nr_thp_succeeded;
+       stats->nr_thp_split += astats.nr_thp_split;
+       if (rc < 0) {
+               stats->nr_failed_pages += astats.nr_failed_pages;
+               stats->nr_thp_failed += astats.nr_thp_failed;
+               list_splice_tail(&folios, ret_folios);
+               return rc;
+       }
+       stats->nr_thp_failed += astats.nr_thp_split;
+       nr_failed += astats.nr_thp_split;
        /*
-        * We have unlocked all locked folios, so we can force lock now, let's
-        * try again.
+        * Fall back to migrate all failed folios one by one synchronously. All
+        * failed folios except split THPs will be retried, so their failure
+        * isn't counted
         */
-       if (rc == -EDEADLOCK)
-               goto retry;
+       list_splice_tail_init(&folios, from);
+       while (!list_empty(from)) {
+               list_move(from->next, &folios);
+               rc = migrate_pages_batch(&folios, get_new_page, put_new_page,
+                                        private, mode, reason, ret_folios,
+                                        split_folios, stats, NR_MAX_MIGRATE_SYNC_RETRY);
+               list_splice_tail_init(&folios, ret_folios);
+               if (rc < 0)
+                       return rc;
+               nr_failed += rc;
+       }
 
-       return rc;
+       return nr_failed;
 }
 
 /*
@@ -1949,6 +1936,7 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
        struct folio *folio, *folio2;
        LIST_HEAD(folios);
        LIST_HEAD(ret_folios);
+       LIST_HEAD(split_folios);
        struct migrate_pages_stats stats;
 
        trace_mm_migrate_pages_start(mode, reason);
@@ -1959,6 +1947,7 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
                                     mode, reason, &stats, &ret_folios);
        if (rc_gather < 0)
                goto out;
+
 again:
        nr_pages = 0;
        list_for_each_entry_safe(folio, folio2, from, lru) {
@@ -1969,20 +1958,36 @@ again:
                }
 
                nr_pages += folio_nr_pages(folio);
-               if (nr_pages > NR_MAX_BATCHED_MIGRATION)
+               if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
                        break;
        }
-       if (nr_pages > NR_MAX_BATCHED_MIGRATION)
-               list_cut_before(&folios, from, &folio->lru);
+       if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
+               list_cut_before(&folios, from, &folio2->lru);
        else
                list_splice_init(from, &folios);
-       rc = migrate_pages_batch(&folios, get_new_page, put_new_page, private,
-                                mode, reason, &ret_folios, &stats);
+       if (mode == MIGRATE_ASYNC)
+               rc = migrate_pages_batch(&folios, get_new_page, put_new_page, private,
+                                        mode, reason, &ret_folios, &split_folios, &stats,
+                                        NR_MAX_MIGRATE_PAGES_RETRY);
+       else
+               rc = migrate_pages_sync(&folios, get_new_page, put_new_page, private,
+                                       mode, reason, &ret_folios, &split_folios, &stats);
        list_splice_tail_init(&folios, &ret_folios);
        if (rc < 0) {
                rc_gather = rc;
+               list_splice_tail(&split_folios, &ret_folios);
                goto out;
        }
+       if (!list_empty(&split_folios)) {
+               /*
+                * Failure isn't counted since all split folios of a large folio
+                * is counted as 1 failure already.  And, we only try to migrate
+                * with minimal effort, force MIGRATE_ASYNC mode and retry once.
+                */
+               migrate_pages_batch(&split_folios, get_new_page, put_new_page, private,
+                                   MIGRATE_ASYNC, reason, &ret_folios, NULL, &stats, 1);
+               list_splice_tail_init(&split_folios, &ret_folios);
+       }
        rc_gather += rc;
        if (!list_empty(from))
                goto again;
index cd69b9db00812655c42fdc91957d6826bd97d9d6..d359650b0f75b9c099e75ecd994a0b82ac38a9fa 100644 (file)
@@ -33,7 +33,7 @@ static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr,
         * Hugepages under user process are always in RAM and never
         * swapped out, but theoretically it needs to be checked.
         */
-       present = pte && !huge_pte_none(huge_ptep_get(pte));
+       present = pte && !huge_pte_none_mostly(huge_ptep_get(pte));
        for (; addr != end; vec++, addr += PAGE_SIZE)
                *vec = present;
        walk->private = vec;
index 740b54be3ed4140f16a6731a275ab49f3a8b256f..ad499f7b767fae4e59837cfdc49a933b1218873f 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2621,12 +2621,7 @@ cannot_expand:
 
        if (map_deny_write_exec(vma, vma->vm_flags)) {
                error = -EACCES;
-               if (file)
-                       goto close_and_free_vma;
-               else if (vma->vm_file)
-                       goto unmap_and_free_vma;
-               else
-                       goto free_vma;
+               goto close_and_free_vma;
        }
 
        /* Allow architectures to sanity-check the vm_flags */
index 231929f119d958f73aba0680b6ed8bae8493b35d..13e84d8c0797a9394caf65d0a0bba52f42338753 100644 (file)
@@ -805,7 +805,7 @@ static int do_mprotect_pkey(unsigned long start, size_t len,
 
                if (map_deny_write_exec(vma, newflags)) {
                        error = -EACCES;
-                       goto out;
+                       break;
                }
 
                /* Allow architectures to sanity-check the new flags */
index ac1fc986af44c46736d4891dea771473c3bee7f1..7136c36c5d01e456d32c1718c43491f9d5d59e6d 100644 (file)
@@ -1398,6 +1398,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
                        unsigned int order, bool check_free, fpi_t fpi_flags)
 {
        int bad = 0;
+       bool skip_kasan_poison = should_skip_kasan_poison(page, fpi_flags);
        bool init = want_init_on_free();
 
        VM_BUG_ON_PAGE(PageTail(page), page);
@@ -1470,7 +1471,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
         * With hardware tag-based KASAN, memory tags must be set before the
         * page becomes unavailable via debug_pagealloc or arch_free_page.
         */
-       if (!should_skip_kasan_poison(page, fpi_flags)) {
+       if (!skip_kasan_poison) {
                kasan_poison_pages(page, order, init);
 
                /* Memory is already initialized if KASAN did it internally. */
index dabc2a671fc6f7ffc5da481a3b7a9574500cdb0a..edbe722fb906481fc151f7038652894b85b5b5cc 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -839,7 +839,7 @@ static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
        return 0;
 }
 
-#if (defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)) || defined(CONFIG_SMP)
+#if defined(CONFIG_NUMA) || defined(CONFIG_SMP)
 /*
  * Allocates and initializes node for a node on each slab cache, used for
  * either memory or cpu hotplug.  If memory is being hot-added, the kmem_cache_node
index ef910bf349e1361e64edc0ed0166605160a0fe52..bef6cf2b4d46da5d9f4eedc60d763f00389f5efa 100644 (file)
@@ -2883,6 +2883,8 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
                unsigned int order, unsigned int nr_pages, struct page **pages)
 {
        unsigned int nr_allocated = 0;
+       gfp_t alloc_gfp = gfp;
+       bool nofail = false;
        struct page *page;
        int i;
 
@@ -2893,6 +2895,7 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
         * more permissive.
         */
        if (!order) {
+               /* bulk allocator doesn't support nofail req. officially */
                gfp_t bulk_gfp = gfp & ~__GFP_NOFAIL;
 
                while (nr_allocated < nr_pages) {
@@ -2931,20 +2934,35 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
                        if (nr != nr_pages_request)
                                break;
                }
+       } else if (gfp & __GFP_NOFAIL) {
+               /*
+                * Higher order nofail allocations are really expensive and
+                * potentially dangerous (pre-mature OOM, disruptive reclaim
+                * and compaction etc.
+                */
+               alloc_gfp &= ~__GFP_NOFAIL;
+               nofail = true;
        }
 
        /* High-order pages or fallback path if "bulk" fails. */
-
        while (nr_allocated < nr_pages) {
                if (fatal_signal_pending(current))
                        break;
 
                if (nid == NUMA_NO_NODE)
-                       page = alloc_pages(gfp, order);
+                       page = alloc_pages(alloc_gfp, order);
                else
-                       page = alloc_pages_node(nid, gfp, order);
-               if (unlikely(!page))
-                       break;
+                       page = alloc_pages_node(nid, alloc_gfp, order);
+               if (unlikely(!page)) {
+                       if (!nofail)
+                               break;
+
+                       /* fall back to the zero order allocations */
+                       alloc_gfp |= __GFP_NOFAIL;
+                       order = 0;
+                       continue;
+               }
+
                /*
                 * Higher order allocations must be able to be treated as
                 * indepdenent small pages by callers (as they can with
index b65c3aabcd53665fa32bcaee888b3ecdaeb76b07..334e308451f5397fb4cda8b8b6f8ef724aade2c1 100644 (file)
@@ -2871,10 +2871,25 @@ int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
                return -ENXIO;
        }
 
-       if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
-           hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
-           hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
-           hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
+       switch (hci_skb_pkt_type(skb)) {
+       case HCI_EVENT_PKT:
+               break;
+       case HCI_ACLDATA_PKT:
+               /* Detect if ISO packet has been sent as ACL */
+               if (hci_conn_num(hdev, ISO_LINK)) {
+                       __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
+                       __u8 type;
+
+                       type = hci_conn_lookup_type(hdev, hci_handle(handle));
+                       if (type == ISO_LINK)
+                               hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
+               }
+               break;
+       case HCI_SCODATA_PKT:
+               break;
+       case HCI_ISODATA_PKT:
+               break;
+       default:
                kfree_skb(skb);
                return -EINVAL;
        }
index 117eedb6f70993a5b58987a7a7bf794aaf8e000e..5a6aa1627791b530abf33f06be87b465e7d40af3 100644 (file)
@@ -643,6 +643,7 @@ void hci_cmd_sync_clear(struct hci_dev *hdev)
        cancel_work_sync(&hdev->cmd_sync_work);
        cancel_work_sync(&hdev->reenable_adv_work);
 
+       mutex_lock(&hdev->cmd_sync_work_lock);
        list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) {
                if (entry->destroy)
                        entry->destroy(hdev, entry->data, -ECANCELED);
@@ -650,6 +651,7 @@ void hci_cmd_sync_clear(struct hci_dev *hdev)
                list_del(&entry->list);
                kfree(entry);
        }
+       mutex_unlock(&hdev->cmd_sync_work_lock);
 }
 
 void __hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
@@ -2367,6 +2369,45 @@ static int hci_resume_advertising_sync(struct hci_dev *hdev)
        return err;
 }
 
+static int hci_pause_addr_resolution(struct hci_dev *hdev)
+{
+       int err;
+
+       if (!use_ll_privacy(hdev))
+               return 0;
+
+       if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
+               return 0;
+
+       /* Cannot disable addr resolution if scanning is enabled or
+        * when initiating an LE connection.
+        */
+       if (hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
+           hci_lookup_le_connect(hdev)) {
+               bt_dev_err(hdev, "Command not allowed when scan/LE connect");
+               return -EPERM;
+       }
+
+       /* Cannot disable addr resolution if advertising is enabled. */
+       err = hci_pause_advertising_sync(hdev);
+       if (err) {
+               bt_dev_err(hdev, "Pause advertising failed: %d", err);
+               return err;
+       }
+
+       err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00);
+       if (err)
+               bt_dev_err(hdev, "Unable to disable Address Resolution: %d",
+                          err);
+
+       /* Return if address resolution is disabled and RPA is not used. */
+       if (!err && scan_use_rpa(hdev))
+               return err;
+
+       hci_resume_advertising_sync(hdev);
+       return err;
+}
+
 struct sk_buff *hci_read_local_oob_data_sync(struct hci_dev *hdev,
                                             bool extended, struct sock *sk)
 {
@@ -2402,7 +2443,7 @@ static u8 hci_update_accept_list_sync(struct hci_dev *hdev)
        u8 filter_policy;
        int err;
 
-       /* Pause advertising if resolving list can be used as controllers are
+       /* Pause advertising if resolving list can be used as controllers
         * cannot accept resolving list modifications while advertising.
         */
        if (use_ll_privacy(hdev)) {
@@ -3319,6 +3360,7 @@ static const struct hci_init_stage amp_init1[] = {
        HCI_INIT(hci_read_flow_control_mode_sync),
        /* HCI_OP_READ_LOCATION_DATA */
        HCI_INIT(hci_read_location_data_sync),
+       {}
 };
 
 static int hci_init1_sync(struct hci_dev *hdev)
@@ -3353,6 +3395,7 @@ static int hci_init1_sync(struct hci_dev *hdev)
 static const struct hci_init_stage amp_init2[] = {
        /* HCI_OP_READ_LOCAL_FEATURES */
        HCI_INIT(hci_read_local_features_sync),
+       {}
 };
 
 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
@@ -5394,27 +5437,12 @@ static int hci_active_scan_sync(struct hci_dev *hdev, uint16_t interval)
 
        cancel_interleave_scan(hdev);
 
-       /* Pause advertising since active scanning disables address resolution
-        * which advertising depend on in order to generate its RPAs.
-        */
-       if (use_ll_privacy(hdev) && hci_dev_test_flag(hdev, HCI_PRIVACY)) {
-               err = hci_pause_advertising_sync(hdev);
-               if (err) {
-                       bt_dev_err(hdev, "pause advertising failed: %d", err);
-                       goto failed;
-               }
-       }
-
-       /* Disable address resolution while doing active scanning since the
-        * accept list shall not be used and all reports shall reach the host
-        * anyway.
+       /* Pause address resolution for active scan and stop advertising if
+        * privacy is enabled.
         */
-       err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00);
-       if (err) {
-               bt_dev_err(hdev, "Unable to disable Address Resolution: %d",
-                          err);
+       err = hci_pause_addr_resolution(hdev);
+       if (err)
                goto failed;
-       }
 
        /* All active scans will be done with either a resolvable private
         * address (when privacy feature has been enabled) or non-resolvable
index 24444b502e5865400d4eecbdcf68ace4b9e687b6..8d136a7301630d3172178e063b0a8c36b5540f4f 100644 (file)
@@ -1620,7 +1620,6 @@ static void iso_disconn_cfm(struct hci_conn *hcon, __u8 reason)
 void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
 {
        struct iso_conn *conn = hcon->iso_data;
-       struct hci_iso_data_hdr *hdr;
        __u16 pb, ts, len;
 
        if (!conn)
@@ -1642,6 +1641,8 @@ void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
                }
 
                if (ts) {
+                       struct hci_iso_ts_data_hdr *hdr;
+
                        /* TODO: add timestamp to the packet? */
                        hdr = skb_pull_data(skb, HCI_ISO_TS_DATA_HDR_SIZE);
                        if (!hdr) {
@@ -1649,15 +1650,19 @@ void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
                                goto drop;
                        }
 
+                       len = __le16_to_cpu(hdr->slen);
                } else {
+                       struct hci_iso_data_hdr *hdr;
+
                        hdr = skb_pull_data(skb, HCI_ISO_DATA_HDR_SIZE);
                        if (!hdr) {
                                BT_ERR("Frame is too short (len %d)", skb->len);
                                goto drop;
                        }
+
+                       len = __le16_to_cpu(hdr->slen);
                }
 
-               len    = __le16_to_cpu(hdr->slen);
                flags  = hci_iso_data_flags(len);
                len    = hci_iso_data_len(len);
 
index adfc3ea06d088ef6b68dc28422092351bafa7133..49926f59cc1230286deaa9c28d6da9eeb6b42352 100644 (file)
@@ -708,6 +708,17 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
 }
 EXPORT_SYMBOL_GPL(l2cap_chan_del);
 
+static void __l2cap_chan_list_id(struct l2cap_conn *conn, u16 id,
+                                l2cap_chan_func_t func, void *data)
+{
+       struct l2cap_chan *chan, *l;
+
+       list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
+               if (chan->ident == id)
+                       func(chan, data);
+       }
+}
+
 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
                              void *data)
 {
@@ -775,23 +786,9 @@ static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
 
 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
 {
-       struct l2cap_conn *conn = chan->conn;
-       struct l2cap_ecred_conn_rsp rsp;
-       u16 result;
-
-       if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
-               result = L2CAP_CR_LE_AUTHORIZATION;
-       else
-               result = L2CAP_CR_LE_BAD_PSM;
-
        l2cap_state_change(chan, BT_DISCONN);
 
-       memset(&rsp, 0, sizeof(rsp));
-
-       rsp.result  = cpu_to_le16(result);
-
-       l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
-                      &rsp);
+       __l2cap_ecred_conn_rsp_defer(chan);
 }
 
 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
@@ -846,7 +843,7 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
                                        break;
                                case L2CAP_MODE_EXT_FLOWCTL:
                                        l2cap_chan_ecred_connect_reject(chan);
-                                       break;
+                                       return;
                                }
                        }
                }
@@ -3938,43 +3935,86 @@ void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
                       &rsp);
 }
 
-void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
+static void l2cap_ecred_list_defer(struct l2cap_chan *chan, void *data)
 {
+       int *result = data;
+
+       if (*result || test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
+               return;
+
+       switch (chan->state) {
+       case BT_CONNECT2:
+               /* If channel still pending accept add to result */
+               (*result)++;
+               return;
+       case BT_CONNECTED:
+               return;
+       default:
+               /* If not connected or pending accept it has been refused */
+               *result = -ECONNREFUSED;
+               return;
+       }
+}
+
+struct l2cap_ecred_rsp_data {
        struct {
                struct l2cap_ecred_conn_rsp rsp;
-               __le16 dcid[5];
+               __le16 scid[L2CAP_ECRED_MAX_CID];
        } __packed pdu;
+       int count;
+};
+
+static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data)
+{
+       struct l2cap_ecred_rsp_data *rsp = data;
+
+       if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
+               return;
+
+       /* Reset ident so only one response is sent */
+       chan->ident = 0;
+
+       /* Include all channels pending with the same ident */
+       if (!rsp->pdu.rsp.result)
+               rsp->pdu.rsp.dcid[rsp->count++] = cpu_to_le16(chan->scid);
+       else
+               l2cap_chan_del(chan, ECONNRESET);
+}
+
+void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
+{
        struct l2cap_conn *conn = chan->conn;
-       u16 ident = chan->ident;
-       int i = 0;
+       struct l2cap_ecred_rsp_data data;
+       u16 id = chan->ident;
+       int result = 0;
 
-       if (!ident)
+       if (!id)
                return;
 
-       BT_DBG("chan %p ident %d", chan, ident);
+       BT_DBG("chan %p id %d", chan, id);
 
-       pdu.rsp.mtu     = cpu_to_le16(chan->imtu);
-       pdu.rsp.mps     = cpu_to_le16(chan->mps);
-       pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
-       pdu.rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
+       memset(&data, 0, sizeof(data));
 
-       mutex_lock(&conn->chan_lock);
+       data.pdu.rsp.mtu     = cpu_to_le16(chan->imtu);
+       data.pdu.rsp.mps     = cpu_to_le16(chan->mps);
+       data.pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
+       data.pdu.rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
 
-       list_for_each_entry(chan, &conn->chan_l, list) {
-               if (chan->ident != ident)
-                       continue;
+       /* Verify that all channels are ready */
+       __l2cap_chan_list_id(conn, id, l2cap_ecred_list_defer, &result);
 
-               /* Reset ident so only one response is sent */
-               chan->ident = 0;
+       if (result > 0)
+               return;
 
-               /* Include all channels pending with the same ident */
-               pdu.dcid[i++] = cpu_to_le16(chan->scid);
-       }
+       if (result < 0)
+               data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_AUTHORIZATION);
 
-       mutex_unlock(&conn->chan_lock);
+       /* Build response */
+       __l2cap_chan_list_id(conn, id, l2cap_ecred_rsp_defer, &data);
 
-       l2cap_send_cmd(conn, ident, L2CAP_ECRED_CONN_RSP,
-                       sizeof(pdu.rsp) + i * sizeof(__le16), &pdu);
+       l2cap_send_cmd(conn, id, L2CAP_ECRED_CONN_RSP,
+                      sizeof(data.pdu.rsp) + (data.count * sizeof(__le16)),
+                      &data.pdu);
 }
 
 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
@@ -6078,6 +6118,7 @@ static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
                __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
 
                chan->ident = cmd->ident;
+               chan->mode = L2CAP_MODE_EXT_FLOWCTL;
 
                if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
                        l2cap_state_change(chan, BT_CONNECT2);
index 7add66f30e4d1a858a75dfba41260c922eacb444..249dc6777fb4e92963d150a1ec135299fb7faa29 100644 (file)
@@ -4639,12 +4639,6 @@ static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
                                       MGMT_OP_SET_EXP_FEATURE,
                                       MGMT_STATUS_INVALID_INDEX);
 
-       /* Changes can only be made when controller is powered down */
-       if (hdev_is_powered(hdev))
-               return mgmt_cmd_status(sk, hdev->id,
-                                      MGMT_OP_SET_EXP_FEATURE,
-                                      MGMT_STATUS_REJECTED);
-
        /* Parameters are limited to a single octet */
        if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
                return mgmt_cmd_status(sk, hdev->id,
@@ -9363,7 +9357,8 @@ static const struct hci_mgmt_handler mgmt_handlers[] = {
        { add_ext_adv_data,        MGMT_ADD_EXT_ADV_DATA_SIZE,
                                                HCI_MGMT_VAR_LEN },
        { add_adv_patterns_monitor_rssi,
-                                  MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE },
+                                  MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
+                                               HCI_MGMT_VAR_LEN },
        { set_mesh,                MGMT_SET_MESH_RECEIVER_SIZE,
                                                HCI_MGMT_VAR_LEN },
        { mesh_features,           MGMT_MESH_READ_FEATURES_SIZE },
index 9e10802587fc3c3057eda5c1786ac3f6fef7b9bb..3abab70d66ddc8a3342dfe6c3f94f2eff9bb3a79 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
 /* Do not edit directly, auto-generated from: */
 /*     Documentation/netlink/specs/netdev.yaml */
 /* YNL-GEN kernel source */
index 2c5fc7d1e8a741e3ba2e7c3332ec4f436225c6d4..74d74fc23167988996d56107771e90e9a3cc7e0e 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
 /* Do not edit directly, auto-generated from: */
 /*     Documentation/netlink/specs/netdev.yaml */
 /* YNL-GEN kernel header */
index 8c92fc55331771495b7c11fe5b4e660d8c11a11c..528d4b37983df8c61bd3f6f50b9f4ccbe8cc7132 100644 (file)
@@ -720,7 +720,10 @@ __diag_ignore_all("-Wmissing-prototypes",
  * @ctx: XDP context pointer.
  * @timestamp: Return value pointer.
  *
- * Returns 0 on success or ``-errno`` on error.
+ * Return:
+ * * Returns 0 on success or ``-errno`` on error.
+ * * ``-EOPNOTSUPP`` : means device driver does not implement kfunc
+ * * ``-ENODATA``    : means no RX-timestamp available for this frame
  */
 __bpf_kfunc int bpf_xdp_metadata_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
 {
@@ -732,7 +735,10 @@ __bpf_kfunc int bpf_xdp_metadata_rx_timestamp(const struct xdp_md *ctx, u64 *tim
  * @ctx: XDP context pointer.
  * @hash: Return value pointer.
  *
- * Returns 0 on success or ``-errno`` on error.
+ * Return:
+ * * Returns 0 on success or ``-errno`` on error.
+ * * ``-EOPNOTSUPP`` : means device driver doesn't implement kfunc
+ * * ``-ENODATA``    : means no RX-hash available for this frame
  */
 __bpf_kfunc int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, u32 *hash)
 {
@@ -774,20 +780,34 @@ static int __init xdp_metadata_init(void)
 }
 late_initcall(xdp_metadata_init);
 
+void xdp_set_features_flag(struct net_device *dev, xdp_features_t val)
+{
+       val &= NETDEV_XDP_ACT_MASK;
+       if (dev->xdp_features == val)
+               return;
+
+       dev->xdp_features = val;
+
+       if (dev->reg_state == NETREG_REGISTERED)
+               call_netdevice_notifiers(NETDEV_XDP_FEAT_CHANGE, dev);
+}
+EXPORT_SYMBOL_GPL(xdp_set_features_flag);
+
 void xdp_features_set_redirect_target(struct net_device *dev, bool support_sg)
 {
-       dev->xdp_features |= NETDEV_XDP_ACT_NDO_XMIT;
-       if (support_sg)
-               dev->xdp_features |= NETDEV_XDP_ACT_NDO_XMIT_SG;
+       xdp_features_t val = (dev->xdp_features | NETDEV_XDP_ACT_NDO_XMIT);
 
-       call_netdevice_notifiers(NETDEV_XDP_FEAT_CHANGE, dev);
+       if (support_sg)
+               val |= NETDEV_XDP_ACT_NDO_XMIT_SG;
+       xdp_set_features_flag(dev, val);
 }
 EXPORT_SYMBOL_GPL(xdp_features_set_redirect_target);
 
 void xdp_features_clear_redirect_target(struct net_device *dev)
 {
-       dev->xdp_features &= ~(NETDEV_XDP_ACT_NDO_XMIT |
-                              NETDEV_XDP_ACT_NDO_XMIT_SG);
-       call_netdevice_notifiers(NETDEV_XDP_FEAT_CHANGE, dev);
+       xdp_features_t val = dev->xdp_features;
+
+       val &= ~(NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_NDO_XMIT_SG);
+       xdp_set_features_flag(dev, val);
 }
 EXPORT_SYMBOL_GPL(xdp_features_clear_redirect_target);
index 6957971c2db23a0cd68cdf74830615d0bc7760a4..cac17183589fee673cbf66509dbba6f6753e069d 100644 (file)
@@ -1933,6 +1933,7 @@ int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
        int new_master_mtu;
        int old_master_mtu;
        int mtu_limit;
+       int overhead;
        int cpu_mtu;
        int err;
 
@@ -1961,9 +1962,10 @@ int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
                        largest_mtu = slave_mtu;
        }
 
-       mtu_limit = min_t(int, master->max_mtu, dev->max_mtu);
+       overhead = dsa_tag_protocol_overhead(cpu_dp->tag_ops);
+       mtu_limit = min_t(int, master->max_mtu, dev->max_mtu + overhead);
        old_master_mtu = master->mtu;
-       new_master_mtu = largest_mtu + dsa_tag_protocol_overhead(cpu_dp->tag_ops);
+       new_master_mtu = largest_mtu + overhead;
        if (new_master_mtu > mtu_limit)
                return -ERANGE;
 
@@ -1998,8 +2000,7 @@ int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
 
 out_port_failed:
        if (new_master_mtu != old_master_mtu)
-               dsa_port_mtu_change(cpu_dp, old_master_mtu -
-                                   dsa_tag_protocol_overhead(cpu_dp->tag_ops));
+               dsa_port_mtu_change(cpu_dp, old_master_mtu - overhead);
 out_cpu_failed:
        if (new_master_mtu != old_master_mtu)
                dev_set_mtu(master, old_master_mtu);
index b2fba1a003ce369a275b7fdbb08952d7165804a5..5105a5ff58fa20156099977a64f81edc98f9c710 100644 (file)
@@ -114,7 +114,7 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
                skb = nskb;
        }
 
-       dev_sw_netstats_rx_add(skb->dev, skb->len);
+       dev_sw_netstats_rx_add(skb->dev, skb->len + ETH_HLEN);
 
        if (dsa_skb_defer_rx_timestamp(p, skb))
                return 0;
index 10239daa57454360b7f867a3bc9771f204f1bbe6..cacdafb41200e5f941dff2e259a57720fb916f43 100644 (file)
@@ -7,6 +7,7 @@
 
 #include <linux/dsa/brcm.h>
 #include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
 #include <linux/list.h>
 #include <linux/slab.h>
 
@@ -252,6 +253,7 @@ static struct sk_buff *brcm_leg_tag_xmit(struct sk_buff *skb,
 static struct sk_buff *brcm_leg_tag_rcv(struct sk_buff *skb,
                                        struct net_device *dev)
 {
+       int len = BRCM_LEG_TAG_LEN;
        int source_port;
        u8 *brcm_tag;
 
@@ -266,12 +268,16 @@ static struct sk_buff *brcm_leg_tag_rcv(struct sk_buff *skb,
        if (!skb->dev)
                return NULL;
 
+       /* VLAN tag is added by BCM63xx internal switch */
+       if (netdev_uses_dsa(skb->dev))
+               len += VLAN_HLEN;
+
        /* Remove Broadcom tag and update checksum */
-       skb_pull_rcsum(skb, BRCM_LEG_TAG_LEN);
+       skb_pull_rcsum(skb, len);
 
        dsa_default_offload_fwd_mark(skb);
 
-       dsa_strip_etype_header(skb, BRCM_LEG_TAG_LEN);
+       dsa_strip_etype_header(skb, len);
 
        return skb;
 }
index 00db74d96583d0f7b33b29697037b979f052c1d5..b77f1189d19d1ff5dc76f4a7345efc7a65918399 100644 (file)
@@ -415,7 +415,7 @@ void hsr_addr_subst_dest(struct hsr_node *node_src, struct sk_buff *skb,
        node_dst = find_node_by_addr_A(&port->hsr->node_db,
                                       eth_hdr(skb)->h_dest);
        if (!node_dst) {
-               if (net_ratelimit())
+               if (port->hsr->prot_version != PRP_V1 && net_ratelimit())
                        netdev_err(skb->dev, "%s: Unknown node\n", __func__);
                return;
        }
index b5736ef16ed2d54d1b15be79de35766d36bd12b3..390f4be7f7bec20f33aa80e9bf12d5e2f3760562 100644 (file)
@@ -576,6 +576,9 @@ static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt,
                        cfg->fc_scope = RT_SCOPE_UNIVERSE;
        }
 
+       if (!cfg->fc_table)
+               cfg->fc_table = RT_TABLE_MAIN;
+
        if (cmd == SIOCDELRT)
                return 0;
 
index 5c14fe030eda555ebfc25e2e073b550ad40495ef..6c37c4f98ccab403e29740f41003b3696df7250f 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
 /* Do not edit directly, auto-generated from: */
 /*     Documentation/netlink/specs/fou.yaml */
 /* YNL-GEN kernel source */
index 58b1e1ed4b3bb3cb6855c74b4c87f8d5876b7b07..dbd0780a5d34c3963681c1d7f0aaf579fc186e5e 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
 /* Do not edit directly, auto-generated from: */
 /*     Documentation/netlink/specs/fou.yaml */
 /* YNL-GEN kernel header */
index e41fdc38ce196fa3ba7c8ed5e6a0aa20c7e30362..6edae3886885565985f9f45bae65ef2f4498f22f 100644 (file)
@@ -828,8 +828,14 @@ bool inet_bind2_bucket_match_addr_any(const struct inet_bind2_bucket *tb, const
 #if IS_ENABLED(CONFIG_IPV6)
        struct in6_addr addr_any = {};
 
-       if (sk->sk_family != tb->family)
+       if (sk->sk_family != tb->family) {
+               if (sk->sk_family == AF_INET)
+                       return net_eq(ib2_net(tb), net) && tb->port == port &&
+                               tb->l3mdev == l3mdev &&
+                               ipv6_addr_equal(&tb->v6_rcv_saddr, &addr_any);
+
                return false;
+       }
 
        if (sk->sk_family == AF_INET6)
                return net_eq(ib2_net(tb), net) && tb->port == port &&
index ffff46cdcb58f9b5b4c210a548d685d8f0c7716a..e55a202649608f6f3c814e143581d631ce0467e6 100644 (file)
@@ -552,7 +552,7 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
                truncate = true;
        }
 
-       nhoff = skb_network_header(skb) - skb_mac_header(skb);
+       nhoff = skb_network_offset(skb);
        if (skb->protocol == htons(ETH_P_IP) &&
            (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
                truncate = true;
@@ -561,7 +561,7 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
                int thoff;
 
                if (skb_transport_header_was_set(skb))
-                       thoff = skb_transport_header(skb) - skb_mac_header(skb);
+                       thoff = skb_transport_offset(skb);
                else
                        thoff = nhoff + sizeof(struct ipv6hdr);
                if (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff)
index de90b09dfe78fc4510985dd436c017d3c46f054c..2541083d49ad66fcc6c4485abb4bd96c50dc0989 100644 (file)
@@ -614,10 +614,10 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
        }
 
        headroom += LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len;
-       if (headroom > dev->needed_headroom)
-               dev->needed_headroom = headroom;
+       if (headroom > READ_ONCE(dev->needed_headroom))
+               WRITE_ONCE(dev->needed_headroom, headroom);
 
-       if (skb_cow_head(skb, dev->needed_headroom)) {
+       if (skb_cow_head(skb, READ_ONCE(dev->needed_headroom))) {
                ip_rt_put(rt);
                goto tx_dropped;
        }
@@ -800,10 +800,10 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
 
        max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr)
                        + rt->dst.header_len + ip_encap_hlen(&tunnel->encap);
-       if (max_headroom > dev->needed_headroom)
-               dev->needed_headroom = max_headroom;
+       if (max_headroom > READ_ONCE(dev->needed_headroom))
+               WRITE_ONCE(dev->needed_headroom, max_headroom);
 
-       if (skb_cow_head(skb, dev->needed_headroom)) {
+       if (skb_cow_head(skb, READ_ONCE(dev->needed_headroom))) {
                ip_rt_put(rt);
                DEV_STATS_INC(dev, tx_dropped);
                kfree_skb(skb);
index 71d01cf3c13eb4bd3d314ef140568d2ffd6a499e..ba839e441450f195012a8d77cb9e5ed956962d2f 100644 (file)
@@ -3605,7 +3605,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
        th->window = htons(min(req->rsk_rcv_wnd, 65535U));
        tcp_options_write(th, NULL, &opts);
        th->doff = (tcp_header_size >> 2);
-       __TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
+       TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
 
 #ifdef CONFIG_TCP_MD5SIG
        /* Okay, we have all we need - do the md5 hash if needed */
index 89f5f0f3f5d65b6aac0dae2302d8a5607a492155..a4ecfc9d25930967a6af7bd9de9aa52bfd08730d 100644 (file)
@@ -959,7 +959,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
                truncate = true;
        }
 
-       nhoff = skb_network_header(skb) - skb_mac_header(skb);
+       nhoff = skb_network_offset(skb);
        if (skb->protocol == htons(ETH_P_IP) &&
            (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
                truncate = true;
@@ -968,7 +968,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
                int thoff;
 
                if (skb_transport_header_was_set(skb))
-                       thoff = skb_transport_header(skb) - skb_mac_header(skb);
+                       thoff = skb_transport_offset(skb);
                else
                        thoff = nhoff + sizeof(struct ipv6hdr);
                if (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff)
index 47b6607a13706ef415e0b19f38014700e673da84..5e80e517f071013410349d1fd93afc00a394e284 100644 (file)
@@ -1240,8 +1240,8 @@ route_lookup:
         */
        max_headroom = LL_RESERVED_SPACE(dst->dev) + sizeof(struct ipv6hdr)
                        + dst->header_len + t->hlen;
-       if (max_headroom > dev->needed_headroom)
-               dev->needed_headroom = max_headroom;
+       if (max_headroom > READ_ONCE(dev->needed_headroom))
+               WRITE_ONCE(dev->needed_headroom, max_headroom);
 
        err = ip6_tnl_encap(skb, t, &proto, fl6);
        if (err)
index eb0295d90039577acc88da033d45ac13692c9cfd..fc3fddeb6f36d4066c91d4eae889deccb07799fc 100644 (file)
@@ -83,7 +83,7 @@ struct iucv_irq_data {
        u16 ippathid;
        u8  ipflags1;
        u8  iptype;
-       u32 res2[8];
+       u32 res2[9];
 };
 
 struct iucv_irq_list {
index 8eb3423008687a3f09c3512d8adbb00b19579695..d3d861911ed65084bd617736c5c32c3ed899de50 100644 (file)
@@ -2611,6 +2611,17 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
        if (!sband)
                return -EINVAL;
 
+       if (params->basic_rates) {
+               if (!ieee80211_parse_bitrates(link->conf->chandef.width,
+                                             wiphy->bands[sband->band],
+                                             params->basic_rates,
+                                             params->basic_rates_len,
+                                             &link->conf->basic_rates))
+                       return -EINVAL;
+               changed |= BSS_CHANGED_BASIC_RATES;
+               ieee80211_check_rate_mask(link);
+       }
+
        if (params->use_cts_prot >= 0) {
                link->conf->use_cts_prot = params->use_cts_prot;
                changed |= BSS_CHANGED_ERP_CTS_PROT;
@@ -2632,16 +2643,6 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
                changed |= BSS_CHANGED_ERP_SLOT;
        }
 
-       if (params->basic_rates) {
-               ieee80211_parse_bitrates(link->conf->chandef.width,
-                                        wiphy->bands[sband->band],
-                                        params->basic_rates,
-                                        params->basic_rates_len,
-                                        &link->conf->basic_rates);
-               changed |= BSS_CHANGED_BASIC_RATES;
-               ieee80211_check_rate_mask(link);
-       }
-
        if (params->ap_isolate >= 0) {
                if (params->ap_isolate)
                        sdata->flags |= IEEE80211_SDATA_DONT_BRIDGE_PACKETS;
index ecc232eb1ee82f9f3a143a65b5b258a2dbd32773..e082582e0aa284b4af3792c4c6dc7c176f61df68 100644 (file)
@@ -1284,6 +1284,9 @@ struct ieee80211_local {
        struct list_head active_txqs[IEEE80211_NUM_ACS];
        u16 schedule_round[IEEE80211_NUM_ACS];
 
+       /* serializes ieee80211_handle_wake_tx_queue */
+       spinlock_t handle_wake_tx_queue_lock;
+
        u16 airtime_flags;
        u32 aql_txq_limit_low[IEEE80211_NUM_ACS];
        u32 aql_txq_limit_high[IEEE80211_NUM_ACS];
index 846528850612a7d9886be8d5ca5147b24fea61e1..ddf2b7811c557973846a5926ef5b521295740e31 100644 (file)
@@ -802,6 +802,8 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
        local->aql_threshold = IEEE80211_AQL_THRESHOLD;
        atomic_set(&local->aql_total_pending_airtime, 0);
 
+       spin_lock_init(&local->handle_wake_tx_queue_lock);
+
        INIT_LIST_HEAD(&local->chanctx_list);
        mutex_init(&local->chanctx_mtx);
 
index f7fdfe710951faa1c9402cfd8d71ced290864f3e..e8de500eb9f3c74af51b82b60fc15f8d3f464d34 100644 (file)
@@ -2765,17 +2765,6 @@ ieee80211_rx_mesh_data(struct ieee80211_sub_if_data *sdata, struct sta_info *sta
            mesh_rmc_check(sdata, eth->h_source, mesh_hdr))
                return RX_DROP_MONITOR;
 
-       /* Frame has reached destination.  Don't forward */
-       if (ether_addr_equal(sdata->vif.addr, eth->h_dest))
-               goto rx_accept;
-
-       if (!ifmsh->mshcfg.dot11MeshForwarding) {
-               if (is_multicast_ether_addr(eth->h_dest))
-                       goto rx_accept;
-
-               return RX_DROP_MONITOR;
-       }
-
        /* forward packet */
        if (sdata->crypto_tx_tailroom_needed_cnt)
                tailroom = IEEE80211_ENCRYPT_TAILROOM;
@@ -2814,6 +2803,17 @@ ieee80211_rx_mesh_data(struct ieee80211_sub_if_data *sdata, struct sta_info *sta
                rcu_read_unlock();
        }
 
+       /* Frame has reached destination.  Don't forward */
+       if (ether_addr_equal(sdata->vif.addr, eth->h_dest))
+               goto rx_accept;
+
+       if (!ifmsh->mshcfg.dot11MeshForwarding) {
+               if (is_multicast_ether_addr(eth->h_dest))
+                       goto rx_accept;
+
+               return RX_DROP_MONITOR;
+       }
+
        skb_set_queue_mapping(skb, ieee802_1d_to_ac[skb->priority]);
 
        ieee80211_fill_mesh_addresses(&hdr, &hdr.frame_control,
index 1a28fe5cb614f2a5fe6f9007d1769fa7d6446a18..3aceb3b731bf4f9e401fabe5fc5af04c056bfb1b 100644 (file)
@@ -314,6 +314,8 @@ void ieee80211_handle_wake_tx_queue(struct ieee80211_hw *hw,
        struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->vif);
        struct ieee80211_txq *queue;
 
+       spin_lock(&local->handle_wake_tx_queue_lock);
+
        /* Use ieee80211_next_txq() for airtime fairness accounting */
        ieee80211_txq_schedule_start(hw, txq->ac);
        while ((queue = ieee80211_next_txq(hw, txq->ac))) {
@@ -321,6 +323,7 @@ void ieee80211_handle_wake_tx_queue(struct ieee80211_hw *hw,
                ieee80211_return_txq(hw, queue, false);
        }
        ieee80211_txq_schedule_end(hw, txq->ac);
+       spin_unlock(&local->handle_wake_tx_queue_lock);
 }
 EXPORT_SYMBOL(ieee80211_handle_wake_tx_queue);
 
index a12c63638680122f7f449fba4060066c78a07c14..1601be5764145d3c736c59699ccc3873a726750f 100644 (file)
@@ -147,6 +147,7 @@ u16 ieee80211_select_queue_80211(struct ieee80211_sub_if_data *sdata,
 u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
                           struct sta_info *sta, struct sk_buff *skb)
 {
+       const struct ethhdr *eth = (void *)skb->data;
        struct mac80211_qos_map *qos_map;
        bool qos;
 
@@ -154,8 +155,9 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
        skb_get_hash(skb);
 
        /* all mesh/ocb stations are required to support WME */
-       if (sta && (sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
-                   sdata->vif.type == NL80211_IFTYPE_OCB))
+       if ((sdata->vif.type == NL80211_IFTYPE_MESH_POINT &&
+           !is_multicast_ether_addr(eth->h_dest)) ||
+           (sdata->vif.type == NL80211_IFTYPE_OCB && sta))
                qos = true;
        else if (sta)
                qos = sta->sta.wme;
index 56628b52d1001a967eb2e504bdbeac0c4cd17acc..5c8dea49626c31a008f9243498564fbcd3cebb1c 100644 (file)
@@ -997,9 +997,13 @@ out:
        return ret;
 }
 
+static struct lock_class_key mptcp_slock_keys[2];
+static struct lock_class_key mptcp_keys[2];
+
 static int mptcp_pm_nl_create_listen_socket(struct sock *sk,
                                            struct mptcp_pm_addr_entry *entry)
 {
+       bool is_ipv6 = sk->sk_family == AF_INET6;
        int addrlen = sizeof(struct sockaddr_in);
        struct sockaddr_storage addr;
        struct socket *ssock;
@@ -1016,6 +1020,18 @@ static int mptcp_pm_nl_create_listen_socket(struct sock *sk,
        if (!newsk)
                return -EINVAL;
 
+       /* The subflow socket lock is acquired in a nested to the msk one
+        * in several places, even by the TCP stack, and this msk is a kernel
+        * socket: lockdep complains. Instead of propagating the _nested
+        * modifiers in several places, re-init the lock class for the msk
+        * socket to an mptcp specific one.
+        */
+       sock_lock_init_class_and_name(newsk,
+                                     is_ipv6 ? "mlock-AF_INET6" : "mlock-AF_INET",
+                                     &mptcp_slock_keys[is_ipv6],
+                                     is_ipv6 ? "msk_lock-AF_INET6" : "msk_lock-AF_INET",
+                                     &mptcp_keys[is_ipv6]);
+
        lock_sock(newsk);
        ssock = __mptcp_nmpc_socket(mptcp_sk(newsk));
        release_sock(newsk);
index 3ad9c46202fc63a5b3a870bf2ba994a8d9148264..60b23b2716c4083349f3f68655d243398bc31776 100644 (file)
@@ -825,7 +825,6 @@ static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk)
        if (sk->sk_socket && !ssk->sk_socket)
                mptcp_sock_graft(ssk, sk->sk_socket);
 
-       mptcp_propagate_sndbuf((struct sock *)msk, ssk);
        mptcp_sockopt_sync_locked(msk, ssk);
        return true;
 }
@@ -2343,7 +2342,6 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
                goto out;
        }
 
-       sock_orphan(ssk);
        subflow->disposable = 1;
 
        /* if ssk hit tcp_done(), tcp_cleanup_ulp() cleared the related ops
@@ -2351,15 +2349,25 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
         * reference owned by msk;
         */
        if (!inet_csk(ssk)->icsk_ulp_ops) {
+               WARN_ON_ONCE(!sock_flag(ssk, SOCK_DEAD));
                kfree_rcu(subflow, rcu);
+       } else if (msk->in_accept_queue && msk->first == ssk) {
+               /* if the first subflow moved to a close state, e.g. due to
+                * incoming reset and we reach here before inet_child_forget()
+                * the TCP stack could later try to close it via
+                * inet_csk_listen_stop(), or deliver it to the user space via
+                * accept().
+                * We can't delete the subflow - or risk a double free - nor let
+                * the msk survive - or will be leaked in the non accept scenario:
+                * fallback and let TCP cope with the subflow cleanup.
+                */
+               WARN_ON_ONCE(sock_flag(ssk, SOCK_DEAD));
+               mptcp_subflow_drop_ctx(ssk);
        } else {
                /* otherwise tcp will dispose of the ssk and subflow ctx */
-               if (ssk->sk_state == TCP_LISTEN) {
-                       tcp_set_state(ssk, TCP_CLOSE);
-                       mptcp_subflow_queue_clean(sk, ssk);
-                       inet_csk_listen_stop(ssk);
+               if (ssk->sk_state == TCP_LISTEN)
                        mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CLOSED);
-               }
+
                __tcp_close(ssk, 0);
 
                /* close acquired an extra ref */
@@ -2399,9 +2407,10 @@ static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu)
        return 0;
 }
 
-static void __mptcp_close_subflow(struct mptcp_sock *msk)
+static void __mptcp_close_subflow(struct sock *sk)
 {
        struct mptcp_subflow_context *subflow, *tmp;
+       struct mptcp_sock *msk = mptcp_sk(sk);
 
        might_sleep();
 
@@ -2415,7 +2424,15 @@ static void __mptcp_close_subflow(struct mptcp_sock *msk)
                if (!skb_queue_empty_lockless(&ssk->sk_receive_queue))
                        continue;
 
-               mptcp_close_ssk((struct sock *)msk, ssk, subflow);
+               mptcp_close_ssk(sk, ssk, subflow);
+       }
+
+       /* if the MPC subflow has been closed before the msk is accepted,
+        * msk will never be accept-ed, close it now
+        */
+       if (!msk->first && msk->in_accept_queue) {
+               sock_set_flag(sk, SOCK_DEAD);
+               inet_sk_state_store(sk, TCP_CLOSE);
        }
 }
 
@@ -2624,6 +2641,9 @@ static void mptcp_worker(struct work_struct *work)
        __mptcp_check_send_data_fin(sk);
        mptcp_check_data_fin(sk);
 
+       if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
+               __mptcp_close_subflow(sk);
+
        /* There is no point in keeping around an orphaned sk timedout or
         * closed, but we need the msk around to reply to incoming DATA_FIN,
         * even if it is orphaned and in FIN_WAIT2 state
@@ -2639,9 +2659,6 @@ static void mptcp_worker(struct work_struct *work)
                }
        }
 
-       if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
-               __mptcp_close_subflow(msk);
-
        if (test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags))
                __mptcp_retrans(sk);
 
@@ -3079,6 +3096,7 @@ struct sock *mptcp_sk_clone(const struct sock *sk,
        msk->local_key = subflow_req->local_key;
        msk->token = subflow_req->token;
        msk->subflow = NULL;
+       msk->in_accept_queue = 1;
        WRITE_ONCE(msk->fully_established, false);
        if (mp_opt->suboptions & OPTION_MPTCP_CSUMREQD)
                WRITE_ONCE(msk->csum_enabled, true);
@@ -3096,8 +3114,7 @@ struct sock *mptcp_sk_clone(const struct sock *sk,
        security_inet_csk_clone(nsk, req);
        bh_unlock_sock(nsk);
 
-       /* keep a single reference */
-       __sock_put(nsk);
+       /* note: the newly allocated socket refcount is 2 now */
        return nsk;
 }
 
@@ -3153,8 +3170,6 @@ static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
                        goto out;
                }
 
-               /* acquire the 2nd reference for the owning socket */
-               sock_hold(new_mptcp_sock);
                newsk = new_mptcp_sock;
                MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEPASSIVEACK);
        } else {
@@ -3705,25 +3720,10 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
                struct sock *newsk = newsock->sk;
 
                set_bit(SOCK_CUSTOM_SOCKOPT, &newsock->flags);
+               msk->in_accept_queue = 0;
 
                lock_sock(newsk);
 
-               /* PM/worker can now acquire the first subflow socket
-                * lock without racing with listener queue cleanup,
-                * we can notify it, if needed.
-                *
-                * Even if remote has reset the initial subflow by now
-                * the refcnt is still at least one.
-                */
-               subflow = mptcp_subflow_ctx(msk->first);
-               list_add(&subflow->node, &msk->conn_list);
-               sock_hold(msk->first);
-               if (mptcp_is_fully_established(newsk))
-                       mptcp_pm_fully_established(msk, msk->first, GFP_KERNEL);
-
-               mptcp_rcv_space_init(msk, msk->first);
-               mptcp_propagate_sndbuf(newsk, msk->first);
-
                /* set ssk->sk_socket of accept()ed flows to mptcp socket.
                 * This is needed so NOSPACE flag can be set from tcp stack.
                 */
index 61fd8eabfca2028680e04558b4baca9f48bbaaaa..339a6f0729898422cfd7e7ee8c014fd09fecbeeb 100644 (file)
@@ -295,7 +295,8 @@ struct mptcp_sock {
        u8              recvmsg_inq:1,
                        cork:1,
                        nodelay:1,
-                       fastopening:1;
+                       fastopening:1,
+                       in_accept_queue:1;
        int             connect_flags;
        struct work_struct work;
        struct sk_buff  *ooo_last_skb;
@@ -628,7 +629,6 @@ void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
                     struct mptcp_subflow_context *subflow);
 void __mptcp_subflow_send_ack(struct sock *ssk);
 void mptcp_subflow_reset(struct sock *ssk);
-void mptcp_subflow_queue_clean(struct sock *sk, struct sock *ssk);
 void mptcp_sock_graft(struct sock *sk, struct socket *parent);
 struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk);
 bool __mptcp_close(struct sock *sk, long timeout);
@@ -666,6 +666,8 @@ void mptcp_subflow_set_active(struct mptcp_subflow_context *subflow);
 
 bool mptcp_subflow_active(struct mptcp_subflow_context *subflow);
 
+void mptcp_subflow_drop_ctx(struct sock *ssk);
+
 static inline void mptcp_subflow_tcp_fallback(struct sock *sk,
                                              struct mptcp_subflow_context *ctx)
 {
index 4ae1a7304cf0da1840a1d236969549d18cf8ff97..a0041360ee9d95b0cf85845e98c0f157a578e59d 100644 (file)
@@ -397,10 +397,15 @@ void mptcp_subflow_reset(struct sock *ssk)
        struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
        struct sock *sk = subflow->conn;
 
+       /* mptcp_mp_fail_no_response() can reach here on an already closed
+        * socket
+        */
+       if (ssk->sk_state == TCP_CLOSE)
+               return;
+
        /* must hold: tcp_done() could drop last reference on parent */
        sock_hold(sk);
 
-       tcp_set_state(ssk, TCP_CLOSE);
        tcp_send_active_reset(ssk, GFP_ATOMIC);
        tcp_done(ssk);
        if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags) &&
@@ -622,7 +627,7 @@ static struct request_sock_ops mptcp_subflow_v6_request_sock_ops __ro_after_init
 static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops __ro_after_init;
 static struct inet_connection_sock_af_ops subflow_v6_specific __ro_after_init;
 static struct inet_connection_sock_af_ops subflow_v6m_specific __ro_after_init;
-static struct proto tcpv6_prot_override;
+static struct proto tcpv6_prot_override __ro_after_init;
 
 static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
 {
@@ -693,9 +698,10 @@ static bool subflow_hmac_valid(const struct request_sock *req,
 
 static void mptcp_force_close(struct sock *sk)
 {
-       /* the msk is not yet exposed to user-space */
+       /* the msk is not yet exposed to user-space, and refcount is 2 */
        inet_sk_state_store(sk, TCP_CLOSE);
        sk_common_release(sk);
+       sock_put(sk);
 }
 
 static void subflow_ulp_fallback(struct sock *sk,
@@ -711,7 +717,7 @@ static void subflow_ulp_fallback(struct sock *sk,
        mptcp_subflow_ops_undo_override(sk);
 }
 
-static void subflow_drop_ctx(struct sock *ssk)
+void mptcp_subflow_drop_ctx(struct sock *ssk)
 {
        struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
 
@@ -750,6 +756,7 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
        struct mptcp_options_received mp_opt;
        bool fallback, fallback_is_fatal;
        struct sock *new_msk = NULL;
+       struct mptcp_sock *owner;
        struct sock *child;
 
        pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
@@ -816,7 +823,7 @@ create_child:
 
                        if (new_msk)
                                mptcp_copy_inaddrs(new_msk, child);
-                       subflow_drop_ctx(child);
+                       mptcp_subflow_drop_ctx(child);
                        goto out;
                }
 
@@ -824,6 +831,8 @@ create_child:
                ctx->setsockopt_seq = listener->setsockopt_seq;
 
                if (ctx->mp_capable) {
+                       owner = mptcp_sk(new_msk);
+
                        /* this can't race with mptcp_close(), as the msk is
                         * not yet exposted to user-space
                         */
@@ -832,14 +841,14 @@ create_child:
                        /* record the newly created socket as the first msk
                         * subflow, but don't link it yet into conn_list
                         */
-                       WRITE_ONCE(mptcp_sk(new_msk)->first, child);
+                       WRITE_ONCE(owner->first, child);
 
                        /* new mpc subflow takes ownership of the newly
                         * created mptcp socket
                         */
                        mptcp_sk(new_msk)->setsockopt_seq = ctx->setsockopt_seq;
-                       mptcp_pm_new_connection(mptcp_sk(new_msk), child, 1);
-                       mptcp_token_accept(subflow_req, mptcp_sk(new_msk));
+                       mptcp_pm_new_connection(owner, child, 1);
+                       mptcp_token_accept(subflow_req, owner);
                        ctx->conn = new_msk;
                        new_msk = NULL;
 
@@ -847,15 +856,21 @@ create_child:
                         * uses the correct data
                         */
                        mptcp_copy_inaddrs(ctx->conn, child);
+                       mptcp_propagate_sndbuf(ctx->conn, child);
+
+                       mptcp_rcv_space_init(owner, child);
+                       list_add(&ctx->node, &owner->conn_list);
+                       sock_hold(child);
 
                        /* with OoO packets we can reach here without ingress
                         * mpc option
                         */
-                       if (mp_opt.suboptions & OPTION_MPTCP_MPC_ACK)
+                       if (mp_opt.suboptions & OPTION_MPTCP_MPC_ACK) {
                                mptcp_subflow_fully_established(ctx, &mp_opt);
+                               mptcp_pm_fully_established(owner, child, GFP_ATOMIC);
+                               ctx->pm_notified = 1;
+                       }
                } else if (ctx->mp_join) {
-                       struct mptcp_sock *owner;
-
                        owner = subflow_req->msk;
                        if (!owner) {
                                subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT);
@@ -899,7 +914,7 @@ out:
        return child;
 
 dispose_child:
-       subflow_drop_ctx(child);
+       mptcp_subflow_drop_ctx(child);
        tcp_rsk(req)->drop_req = true;
        inet_csk_prepare_for_destroy_sock(child);
        tcp_done(child);
@@ -910,7 +925,7 @@ dispose_child:
 }
 
 static struct inet_connection_sock_af_ops subflow_specific __ro_after_init;
-static struct proto tcp_prot_override;
+static struct proto tcp_prot_override __ro_after_init;
 
 enum mapping_status {
        MAPPING_OK,
@@ -1432,6 +1447,13 @@ static void subflow_error_report(struct sock *ssk)
 {
        struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
 
+       /* bail early if this is a no-op, so that we avoid introducing a
+        * problematic lockdep dependency between TCP accept queue lock
+        * and msk socket spinlock
+        */
+       if (!sk->sk_socket)
+               return;
+
        mptcp_data_lock(sk);
        if (!sock_owned_by_user(sk))
                __mptcp_error_report(sk);
@@ -1803,79 +1825,6 @@ static void subflow_state_change(struct sock *sk)
        }
 }
 
-void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_ssk)
-{
-       struct request_sock_queue *queue = &inet_csk(listener_ssk)->icsk_accept_queue;
-       struct mptcp_sock *msk, *next, *head = NULL;
-       struct request_sock *req;
-
-       /* build a list of all unaccepted mptcp sockets */
-       spin_lock_bh(&queue->rskq_lock);
-       for (req = queue->rskq_accept_head; req; req = req->dl_next) {
-               struct mptcp_subflow_context *subflow;
-               struct sock *ssk = req->sk;
-               struct mptcp_sock *msk;
-
-               if (!sk_is_mptcp(ssk))
-                       continue;
-
-               subflow = mptcp_subflow_ctx(ssk);
-               if (!subflow || !subflow->conn)
-                       continue;
-
-               /* skip if already in list */
-               msk = mptcp_sk(subflow->conn);
-               if (msk->dl_next || msk == head)
-                       continue;
-
-               msk->dl_next = head;
-               head = msk;
-       }
-       spin_unlock_bh(&queue->rskq_lock);
-       if (!head)
-               return;
-
-       /* can't acquire the msk socket lock under the subflow one,
-        * or will cause ABBA deadlock
-        */
-       release_sock(listener_ssk);
-
-       for (msk = head; msk; msk = next) {
-               struct sock *sk = (struct sock *)msk;
-               bool do_cancel_work;
-
-               sock_hold(sk);
-               lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
-               next = msk->dl_next;
-               msk->first = NULL;
-               msk->dl_next = NULL;
-
-               do_cancel_work = __mptcp_close(sk, 0);
-               release_sock(sk);
-               if (do_cancel_work) {
-                       /* lockdep will report a false positive ABBA deadlock
-                        * between cancel_work_sync and the listener socket.
-                        * The involved locks belong to different sockets WRT
-                        * the existing AB chain.
-                        * Using a per socket key is problematic as key
-                        * deregistration requires process context and must be
-                        * performed at socket disposal time, in atomic
-                        * context.
-                        * Just tell lockdep to consider the listener socket
-                        * released here.
-                        */
-                       mutex_release(&listener_sk->sk_lock.dep_map, _RET_IP_);
-                       mptcp_cancel_work(sk);
-                       mutex_acquire(&listener_sk->sk_lock.dep_map,
-                                     SINGLE_DEPTH_NESTING, 0, _RET_IP_);
-               }
-               sock_put(sk);
-       }
-
-       /* we are still under the listener msk socket lock */
-       lock_sock_nested(listener_ssk, SINGLE_DEPTH_NESTING);
-}
-
 static int subflow_ulp_init(struct sock *sk)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
@@ -1932,6 +1881,13 @@ static void subflow_ulp_release(struct sock *ssk)
                 * when the subflow is still unaccepted
                 */
                release = ctx->disposable || list_empty(&ctx->node);
+
+               /* inet_child_forget() does not call sk_state_change(),
+                * explicitly trigger the socket close machinery
+                */
+               if (!release && !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW,
+                                                 &mptcp_sk(sk)->flags))
+                       mptcp_schedule_work(sk);
                sock_put(sk);
        }
 
index 80713febfac6dd1911f83d05eb649888dcb93514..d9da942ad53dd98da908d2b80d98a4827eee4954 100644 (file)
@@ -1803,8 +1803,8 @@ struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
        pdev = to_platform_device(dev->dev.parent);
        if (pdev) {
                np = pdev->dev.of_node;
-               if (np && (of_get_property(np, "mellanox,multi-host", NULL) ||
-                          of_get_property(np, "mlx,multi-host", NULL)))
+               if (np && (of_property_read_bool(np, "mellanox,multi-host") ||
+                          of_property_read_bool(np, "mlx,multi-host")))
                        ndp->mlx_multi_host = true;
        }
 
index e55e455275c48ea8875d09e45c474527464c366c..9544c2f16998bf75420576d9a1fad8313297f4d0 100644 (file)
@@ -43,7 +43,7 @@ static int nft_masq_init(const struct nft_ctx *ctx,
                         const struct nft_expr *expr,
                         const struct nlattr * const tb[])
 {
-       u32 plen = sizeof_field(struct nf_nat_range, min_addr.all);
+       u32 plen = sizeof_field(struct nf_nat_range, min_proto.all);
        struct nft_masq *priv = nft_expr_priv(expr);
        int err;
 
index 0479991503900ec24b8ce7391481b46713c1de9b..5c29915ab0289ee615a47a0d336cead0b377cef2 100644 (file)
@@ -226,7 +226,7 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
                priv->flags |= NF_NAT_RANGE_MAP_IPS;
        }
 
-       plen = sizeof_field(struct nf_nat_range, min_addr.all);
+       plen = sizeof_field(struct nf_nat_range, min_proto.all);
        if (tb[NFTA_NAT_REG_PROTO_MIN]) {
                err = nft_parse_register_load(tb[NFTA_NAT_REG_PROTO_MIN],
                                              &priv->sreg_proto_min, plen);
index 5f77399875593ba6dd88d6ea54fdae904eb6cd69..67cec56bc84a35ca111853b58f799953b56eef6c 100644 (file)
@@ -48,7 +48,7 @@ static int nft_redir_init(const struct nft_ctx *ctx,
        unsigned int plen;
        int err;
 
-       plen = sizeof_field(struct nf_nat_range, min_addr.all);
+       plen = sizeof_field(struct nf_nat_range, min_proto.all);
        if (tb[NFTA_REDIR_REG_PROTO_MIN]) {
                err = nft_parse_register_load(tb[NFTA_REDIR_REG_PROTO_MIN],
                                              &priv->sreg_proto_min, plen);
@@ -236,7 +236,7 @@ static struct nft_expr_type nft_redir_inet_type __read_mostly = {
        .name           = "redir",
        .ops            = &nft_redir_inet_ops,
        .policy         = nft_redir_policy,
-       .maxattr        = NFTA_MASQ_MAX,
+       .maxattr        = NFTA_REDIR_MAX,
        .owner          = THIS_MODULE,
 };
 
index 34c50867504160d02d413ea211c075525ad6bf26..296fc1afedd82bdaed2e08aa12a03180a1ff7184 100644 (file)
@@ -1589,6 +1589,10 @@ static int tca_get_fill(struct sk_buff *skb, struct tc_action *actions[],
        t->tca__pad1 = 0;
        t->tca__pad2 = 0;
 
+       if (extack && extack->_msg &&
+           nla_put_string(skb, TCA_ROOT_EXT_WARN_MSG, extack->_msg))
+               goto out_nlmsg_trim;
+
        nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
        if (!nest)
                goto out_nlmsg_trim;
@@ -1596,10 +1600,6 @@ static int tca_get_fill(struct sk_buff *skb, struct tc_action *actions[],
        if (tcf_action_dump(skb, actions, bind, ref, false) < 0)
                goto out_nlmsg_trim;
 
-       if (extack && extack->_msg &&
-           nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
-               goto out_nlmsg_trim;
-
        nla_nest_end(skb, nest);
 
        nlh->nlmsg_len = skb_tail_pointer(skb) - b;
index ff6dd86bdc9f3f504426f0d3f91424f14d77b689..c6b4a62276f6d8e16d58c6156d6f042c54fed355 100644 (file)
@@ -3501,6 +3501,7 @@ out_pnet:
 out_nl:
        smc_nl_exit();
 out_ism:
+       smc_clc_exit();
        smc_ism_exit();
 out_pernet_subsys_stat:
        unregister_pernet_subsys(&smc_net_stat_ops);
index 53f63bfbaf5f92915bb7af4e02fe8fe2e96b89f2..89105e95b4523f0a0d197e7167f27914430f482e 100644 (file)
@@ -114,6 +114,9 @@ int smc_cdc_msg_send(struct smc_connection *conn,
        union smc_host_cursor cfed;
        int rc;
 
+       if (unlikely(!READ_ONCE(conn->sndbuf_desc)))
+               return -ENOBUFS;
+
        smc_cdc_add_pending_send(conn, pend);
 
        conn->tx_cdc_seq++;
index d52060b2680cf0090cb91f454a27f22b10ff2e02..454356771cda55b10c496cbebc470c273d4c56ea 100644 (file)
@@ -1464,7 +1464,7 @@ static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft)
        if (lgr->terminating)
                return; /* lgr already terminating */
        /* cancel free_work sync, will terminate when lgr->freeing is set */
-       cancel_delayed_work_sync(&lgr->free_work);
+       cancel_delayed_work(&lgr->free_work);
        lgr->terminating = 1;
 
        /* kill remaining link group connections */
index 6c7c52eeed4f85773c71fea22879d04b14a9f665..212c5d57465a1bf56afc96af1e91d135ca6e1325 100644 (file)
@@ -353,7 +353,9 @@ gss_krb5_checksum(struct crypto_ahash *tfm, char *header, int hdrlen,
        err = crypto_ahash_final(req);
        if (err)
                goto out_free_ahash;
-       memcpy(cksumout->data, checksumdata, cksumout->len);
+
+       memcpy(cksumout->data, checksumdata,
+              min_t(int, cksumout->len, crypto_ahash_digestsize(tfm)));
 
 out_free_ahash:
        ahash_request_free(req);
@@ -809,8 +811,7 @@ gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
        buf->tail[0].iov_len += GSS_KRB5_TOK_HDR_LEN;
        buf->len += GSS_KRB5_TOK_HDR_LEN;
 
-       /* Do the HMAC */
-       hmac.len = GSS_KRB5_MAX_CKSUM_LEN;
+       hmac.len = kctx->gk5e->cksumlength;
        hmac.data = buf->tail[0].iov_base + buf->tail[0].iov_len;
 
        /*
@@ -873,8 +874,7 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, u32 len,
        if (ret)
                goto out_err;
 
-       /* Calculate our hmac over the plaintext data */
-       our_hmac_obj.len = sizeof(our_hmac);
+       our_hmac_obj.len = kctx->gk5e->cksumlength;
        our_hmac_obj.data = our_hmac;
        ret = gss_krb5_checksum(ahash, NULL, 0, &subbuf, 0, &our_hmac_obj);
        if (ret)
index a1581c77cf84a81bb5503d57039e49b5b970ce85..6564192e7f20170f2623f6eca5180b54bfc2ab64 100644 (file)
@@ -241,21 +241,18 @@ static int virtio_transport_send_pkt_info(struct vsock_sock *vsk,
 }
 
 static bool virtio_transport_inc_rx_pkt(struct virtio_vsock_sock *vvs,
-                                       struct sk_buff *skb)
+                                       u32 len)
 {
-       if (vvs->rx_bytes + skb->len > vvs->buf_alloc)
+       if (vvs->rx_bytes + len > vvs->buf_alloc)
                return false;
 
-       vvs->rx_bytes += skb->len;
+       vvs->rx_bytes += len;
        return true;
 }
 
 static void virtio_transport_dec_rx_pkt(struct virtio_vsock_sock *vvs,
-                                       struct sk_buff *skb)
+                                       u32 len)
 {
-       int len;
-
-       len = skb_headroom(skb) - sizeof(struct virtio_vsock_hdr) - skb->len;
        vvs->rx_bytes -= len;
        vvs->fwd_cnt += len;
 }
@@ -367,7 +364,7 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
 
        spin_lock_bh(&vvs->rx_lock);
        while (total < len && !skb_queue_empty(&vvs->rx_queue)) {
-               skb = __skb_dequeue(&vvs->rx_queue);
+               skb = skb_peek(&vvs->rx_queue);
 
                bytes = len - total;
                if (bytes > skb->len)
@@ -388,10 +385,11 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
                skb_pull(skb, bytes);
 
                if (skb->len == 0) {
-                       virtio_transport_dec_rx_pkt(vvs, skb);
+                       u32 pkt_len = le32_to_cpu(virtio_vsock_hdr(skb)->len);
+
+                       virtio_transport_dec_rx_pkt(vvs, pkt_len);
+                       __skb_unlink(skb, &vvs->rx_queue);
                        consume_skb(skb);
-               } else {
-                       __skb_queue_head(&vvs->rx_queue, skb);
                }
        }
 
@@ -437,17 +435,17 @@ static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk,
 
        while (!msg_ready) {
                struct virtio_vsock_hdr *hdr;
+               size_t pkt_len;
 
                skb = __skb_dequeue(&vvs->rx_queue);
                if (!skb)
                        break;
                hdr = virtio_vsock_hdr(skb);
+               pkt_len = (size_t)le32_to_cpu(hdr->len);
 
                if (dequeued_len >= 0) {
-                       size_t pkt_len;
                        size_t bytes_to_copy;
 
-                       pkt_len = (size_t)le32_to_cpu(hdr->len);
                        bytes_to_copy = min(user_buf_len, pkt_len);
 
                        if (bytes_to_copy) {
@@ -466,7 +464,6 @@ static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk,
                                        dequeued_len = err;
                                } else {
                                        user_buf_len -= bytes_to_copy;
-                                       skb_pull(skb, bytes_to_copy);
                                }
 
                                spin_lock_bh(&vvs->rx_lock);
@@ -484,7 +481,7 @@ static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk,
                                msg->msg_flags |= MSG_EOR;
                }
 
-               virtio_transport_dec_rx_pkt(vvs, skb);
+               virtio_transport_dec_rx_pkt(vvs, pkt_len);
                kfree_skb(skb);
        }
 
@@ -1040,7 +1037,7 @@ virtio_transport_recv_enqueue(struct vsock_sock *vsk,
 
        spin_lock_bh(&vvs->rx_lock);
 
-       can_enqueue = virtio_transport_inc_rx_pkt(vvs, skb);
+       can_enqueue = virtio_transport_inc_rx_pkt(vvs, len);
        if (!can_enqueue) {
                free_pkt = true;
                goto out;
index 112b4bb009c80f648748256b322d9a5e7a7e8514..4f63059efd813ba6dd257a7c7ebebacb2f102cd1 100644 (file)
@@ -462,6 +462,11 @@ nl80211_sta_wme_policy[NL80211_STA_WME_MAX + 1] = {
        [NL80211_STA_WME_MAX_SP] = { .type = NLA_U8 },
 };
 
+static struct netlink_range_validation nl80211_punct_bitmap_range = {
+       .min = 0,
+       .max = 0xffff,
+};
+
 static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
        [0] = { .strict_start_type = NL80211_ATTR_HE_OBSS_PD },
        [NL80211_ATTR_WIPHY] = { .type = NLA_U32 },
@@ -805,7 +810,8 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
        [NL80211_ATTR_MLD_ADDR] = NLA_POLICY_EXACT_LEN(ETH_ALEN),
        [NL80211_ATTR_MLO_SUPPORT] = { .type = NLA_FLAG },
        [NL80211_ATTR_MAX_NUM_AKM_SUITES] = { .type = NLA_REJECT },
-       [NL80211_ATTR_PUNCT_BITMAP] = NLA_POLICY_RANGE(NLA_U8, 0, 0xffff),
+       [NL80211_ATTR_PUNCT_BITMAP] =
+               NLA_POLICY_FULL_RANGE(NLA_U32, &nl80211_punct_bitmap_range),
 };
 
 /* policy for the key attributes */
@@ -8901,7 +8907,7 @@ static bool cfg80211_off_channel_oper_allowed(struct wireless_dev *wdev,
                struct cfg80211_chan_def *chandef;
 
                chandef = wdev_chandef(wdev, link_id);
-               if (!chandef)
+               if (!chandef || !chandef->chan)
                        continue;
 
                /*
@@ -10793,8 +10799,7 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
 
 static struct cfg80211_bss *nl80211_assoc_bss(struct cfg80211_registered_device *rdev,
                                              const u8 *ssid, int ssid_len,
-                                             struct nlattr **attrs,
-                                             const u8 **bssid_out)
+                                             struct nlattr **attrs)
 {
        struct ieee80211_channel *chan;
        struct cfg80211_bss *bss;
@@ -10821,7 +10826,6 @@ static struct cfg80211_bss *nl80211_assoc_bss(struct cfg80211_registered_device
        if (!bss)
                return ERR_PTR(-ENOENT);
 
-       *bssid_out = bssid;
        return bss;
 }
 
@@ -10831,7 +10835,7 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
        struct net_device *dev = info->user_ptr[1];
        struct cfg80211_assoc_request req = {};
        struct nlattr **attrs = NULL;
-       const u8 *bssid, *ssid;
+       const u8 *ap_addr, *ssid;
        unsigned int link_id;
        int err, ssid_len;
 
@@ -10968,6 +10972,7 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
                        return -EINVAL;
 
                req.ap_mld_addr = nla_data(info->attrs[NL80211_ATTR_MLD_ADDR]);
+               ap_addr = req.ap_mld_addr;
 
                attrs = kzalloc(attrsize, GFP_KERNEL);
                if (!attrs)
@@ -10993,8 +10998,7 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
                                goto free;
                        }
                        req.links[link_id].bss =
-                               nl80211_assoc_bss(rdev, ssid, ssid_len, attrs,
-                                                 &bssid);
+                               nl80211_assoc_bss(rdev, ssid, ssid_len, attrs);
                        if (IS_ERR(req.links[link_id].bss)) {
                                err = PTR_ERR(req.links[link_id].bss);
                                req.links[link_id].bss = NULL;
@@ -11045,10 +11049,10 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
                if (req.link_id >= 0)
                        return -EINVAL;
 
-               req.bss = nl80211_assoc_bss(rdev, ssid, ssid_len, info->attrs,
-                                           &bssid);
+               req.bss = nl80211_assoc_bss(rdev, ssid, ssid_len, info->attrs);
                if (IS_ERR(req.bss))
                        return PTR_ERR(req.bss);
+               ap_addr = req.bss->bssid;
        }
 
        err = nl80211_crypto_settings(rdev, info, &req.crypto, 1);
@@ -11061,7 +11065,7 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
                        dev->ieee80211_ptr->conn_owner_nlportid =
                                info->snd_portid;
                        memcpy(dev->ieee80211_ptr->disconnect_bssid,
-                              bssid, ETH_ALEN);
+                              ap_addr, ETH_ALEN);
                }
 
                wdev_unlock(dev->ieee80211_ptr);
index 4681e8e8ad943605b61aa6be685e6c76a70b4c0f..02207e852d796d31a04e3a1a83f500c27b21203d 100644 (file)
@@ -150,10 +150,11 @@ static int xdp_umem_account_pages(struct xdp_umem *umem)
 
 static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
 {
-       u32 npgs_rem, chunk_size = mr->chunk_size, headroom = mr->headroom;
        bool unaligned_chunks = mr->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG;
-       u64 npgs, addr = mr->addr, size = mr->len;
-       unsigned int chunks, chunks_rem;
+       u32 chunk_size = mr->chunk_size, headroom = mr->headroom;
+       u64 addr = mr->addr, size = mr->len;
+       u32 chunks_rem, npgs_rem;
+       u64 chunks, npgs;
        int err;
 
        if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) {
@@ -188,8 +189,8 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
        if (npgs > U32_MAX)
                return -EINVAL;
 
-       chunks = (unsigned int)div_u64_rem(size, chunk_size, &chunks_rem);
-       if (chunks == 0)
+       chunks = div_u64_rem(size, chunk_size, &chunks_rem);
+       if (!chunks || chunks > U32_MAX)
                return -EINVAL;
 
        if (!unaligned_chunks && chunks_rem)
@@ -202,7 +203,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
        umem->headroom = headroom;
        umem->chunk_size = chunk_size;
        umem->chunks = chunks;
-       umem->npgs = (u32)npgs;
+       umem->npgs = npgs;
        umem->pgs = NULL;
        umem->user = NULL;
        umem->flags = mr->flags;
index 2ab3e09e2227126f5e5eba7d4f1ea172de090c58..50baf50dc513ac36f540c172b9a8389d7c78d5f2 100644 (file)
@@ -2815,11 +2815,6 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload,
                        goto error;
                }
 
-               if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL)) {
-                       NL_SET_ERR_MSG(extack, "Only tunnel modes can accommodate an AF_UNSPEC selector");
-                       goto error;
-               }
-
                x->inner_mode = *inner_mode;
 
                if (x->props.family == AF_INET)
index cf5172d4ce68cfed78c88b782335ca4d8b66f09d..103af2b3e986f322882810c132096d65429e3d07 100644 (file)
@@ -1012,7 +1012,9 @@ static int copy_to_user_aead(struct xfrm_algo_aead *aead, struct sk_buff *skb)
                return -EMSGSIZE;
 
        ap = nla_data(nla);
-       memcpy(ap, aead, sizeof(*aead));
+       strscpy_pad(ap->alg_name, aead->alg_name, sizeof(ap->alg_name));
+       ap->alg_key_len = aead->alg_key_len;
+       ap->alg_icv_len = aead->alg_icv_len;
 
        if (redact_secret && aead->alg_key_len)
                memset(ap->alg_key, 0, (aead->alg_key_len + 7) / 8);
@@ -1032,7 +1034,8 @@ static int copy_to_user_ealg(struct xfrm_algo *ealg, struct sk_buff *skb)
                return -EMSGSIZE;
 
        ap = nla_data(nla);
-       memcpy(ap, ealg, sizeof(*ealg));
+       strscpy_pad(ap->alg_name, ealg->alg_name, sizeof(ap->alg_name));
+       ap->alg_key_len = ealg->alg_key_len;
 
        if (redact_secret && ealg->alg_key_len)
                memset(ap->alg_key, 0, (ealg->alg_key_len + 7) / 8);
@@ -1043,6 +1046,40 @@ static int copy_to_user_ealg(struct xfrm_algo *ealg, struct sk_buff *skb)
        return 0;
 }
 
+static int copy_to_user_calg(struct xfrm_algo *calg, struct sk_buff *skb)
+{
+       struct nlattr *nla = nla_reserve(skb, XFRMA_ALG_COMP, sizeof(*calg));
+       struct xfrm_algo *ap;
+
+       if (!nla)
+               return -EMSGSIZE;
+
+       ap = nla_data(nla);
+       strscpy_pad(ap->alg_name, calg->alg_name, sizeof(ap->alg_name));
+       ap->alg_key_len = 0;
+
+       return 0;
+}
+
+static int copy_to_user_encap(struct xfrm_encap_tmpl *ep, struct sk_buff *skb)
+{
+       struct nlattr *nla = nla_reserve(skb, XFRMA_ENCAP, sizeof(*ep));
+       struct xfrm_encap_tmpl *uep;
+
+       if (!nla)
+               return -EMSGSIZE;
+
+       uep = nla_data(nla);
+       memset(uep, 0, sizeof(*uep));
+
+       uep->encap_type = ep->encap_type;
+       uep->encap_sport = ep->encap_sport;
+       uep->encap_dport = ep->encap_dport;
+       uep->encap_oa = ep->encap_oa;
+
+       return 0;
+}
+
 static int xfrm_smark_put(struct sk_buff *skb, struct xfrm_mark *m)
 {
        int ret = 0;
@@ -1098,12 +1135,12 @@ static int copy_to_user_state_extra(struct xfrm_state *x,
                        goto out;
        }
        if (x->calg) {
-               ret = nla_put(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg);
+               ret = copy_to_user_calg(x->calg, skb);
                if (ret)
                        goto out;
        }
        if (x->encap) {
-               ret = nla_put(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
+               ret = copy_to_user_encap(x->encap, skb);
                if (ret)
                        goto out;
        }
index feb43045d1b1ed84bab049b98cd50af88d3643cc..6e9ce6720a05aacbaa2f7e0dc96c2e67819e7269 100644 (file)
@@ -3,7 +3,6 @@
 /generate_rust_target
 /insert-sys-cert
 /kallsyms
-/list-gitignored
 /module.lds
 /recordmcount
 /sign-file
index e8917975905ca65973bc2e1e005a53006c08e5a3..32b6ba7227284c0482c980d6f9a02a15d8ab4f7a 100644 (file)
@@ -38,7 +38,7 @@ HOSTCFLAGS_sorttable.o += -DMCOUNT_SORT_ENABLED
 endif
 
 # The following programs are only built on demand
-hostprogs += list-gitignored unifdef
+hostprogs += unifdef
 
 # The module linker script is preprocessed on demand
 targets += module.lds
index b941e6341b364f26cba7555a56a9390745656e71..61f72eb8d9be7bc5de132b4304c5ebffbfe96341 100644 (file)
@@ -2,6 +2,7 @@
 # Makefile for the different targets used to generate full packages of a kernel
 
 include $(srctree)/scripts/Kbuild.include
+include $(srctree)/scripts/Makefile.lib
 
 KERNELPATH := kernel-$(subst -,_,$(KERNELRELEASE))
 KBUILD_PKG_ROOTCMD ?="fakeroot -u"
@@ -26,54 +27,46 @@ fi ; \
 tar -I $(KGZIP) -c $(RCS_TAR_IGNORE) -f $(2).tar.gz \
        --transform 's:^:$(2)/:S' $(TAR_CONTENT) $(3)
 
-# .tmp_filelist .tmp_filelist_exclude
+# tarball compression
 # ---------------------------------------------------------------------------
 
-scripts/list-gitignored: FORCE
-       $(Q)$(MAKE) -f $(srctree)/Makefile scripts_package
+%.tar.gz: %.tar
+       $(call cmd,gzip)
 
-# 1f5d3a6b6532e25a5cdf1f311956b2b03d343a48 removed '*.rej' from .gitignore,
-# but it is definitely a generated file.
-filechk_filelist = \
-       $< --exclude='*.rej' --output=$@_exclude --prefix=./ --rootdir=$(srctree) --stat=-
+%.tar.bz2: %.tar
+       $(call cmd,bzip2)
 
-.tmp_filelist: scripts/list-gitignored FORCE
-       $(call filechk,filelist)
+%.tar.xz: %.tar
+       $(call cmd,xzmisc)
 
-# tarball
-# ---------------------------------------------------------------------------
-
-quiet_cmd_tar = TAR     $@
-      cmd_tar = tar -c -f $@ $(tar-compress-opt) $(tar-exclude-opt) \
-                --owner=0 --group=0 --sort=name \
-                --transform 's:^\.:$*:S' -C $(tar-rootdir) .
-
-tar-rootdir := $(srctree)
+%.tar.zst: %.tar
+       $(call cmd,zstd)
 
-%.tar:
-       $(call cmd,tar)
-
-%.tar.gz: private tar-compress-opt := -I $(KGZIP)
-%.tar.gz:
-       $(call cmd,tar)
+# Git
+# ---------------------------------------------------------------------------
 
-%.tar.bz2: private tar-compress-opt := -I $(KBZIP2)
-%.tar.bz2:
-       $(call cmd,tar)
+filechk_HEAD = git -C $(srctree) rev-parse --verify HEAD 2>/dev/null
 
-%.tar.xz: private tar-compress-opt := -I $(XZ)
-%.tar.xz:
-       $(call cmd,tar)
+.tmp_HEAD: check-git FORCE
+       $(call filechk,HEAD)
 
-%.tar.zst: private tar-compress-opt := -I $(ZSTD)
-%.tar.zst:
-       $(call cmd,tar)
+PHONY += check-git
+check-git:
+       @if ! $(srctree)/scripts/check-git; then \
+               echo >&2 "error: creating source package requires git repository"; \
+               false; \
+       fi
 
 # Linux source tarball
 # ---------------------------------------------------------------------------
 
-linux.tar.gz: tar-exclude-opt = --exclude=./$@ --exclude-from=$<_exclude
-linux.tar.gz: .tmp_filelist
+quiet_cmd_archive_linux = ARCHIVE $@
+      cmd_archive_linux = \
+       git -C $(srctree) archive --output=$$(realpath $@) --prefix=$(basename $@)/ $$(cat $<)
+
+targets += linux.tar
+linux.tar: .tmp_HEAD FORCE
+       $(call if_changed,archive_linux)
 
 # rpm-pkg
 # ---------------------------------------------------------------------------
@@ -89,7 +82,7 @@ PHONY += srcrpm-pkg
 srcrpm-pkg: linux.tar.gz
        $(CONFIG_SHELL) $(MKSPEC) >$(objtree)/kernel.spec
        +rpmbuild $(RPMOPTS) --target $(UTS_MACHINE)-linux -bs kernel.spec \
-       --define='_smp_mflags %{nil}' --define='_sourcedir .' --define='_srcrpmdir .'
+       --define='_smp_mflags %{nil}' --define='_sourcedir rpmbuild/SOURCES' --define='_srcrpmdir .'
 
 # binrpm-pkg
 # ---------------------------------------------------------------------------
@@ -148,74 +141,62 @@ snap-pkg:
 # dir-pkg tar*-pkg - tarball targets
 # ---------------------------------------------------------------------------
 
-tar-pkg-tarball = linux-$(KERNELRELEASE)-$(ARCH).$(1)
-tar-pkg-phony = $(subst .,,$(1))-pkg
-
 tar-install: FORCE
        $(Q)$(MAKE) -f $(srctree)/Makefile
        +$(Q)$(srctree)/scripts/package/buildtar $@
 
+quiet_cmd_tar = TAR     $@
+      cmd_tar = cd $<; tar cf ../$@ --owner=root --group=root --sort=name *
+
+linux-$(KERNELRELEASE)-$(ARCH).tar: tar-install
+       $(call cmd,tar)
+
 PHONY += dir-pkg
 dir-pkg: tar-install
        @echo "Kernel tree successfully created in $<"
 
-define tar-pkg-rule
-PHONY += $(tar-pkg-phony)
-$(tar-pkg-phony): $(tar-pkg-tarball)
+PHONY += tar-pkg
+tar-pkg: linux-$(KERNELRELEASE)-$(ARCH).tar
        @:
 
-$(tar-pkg-tarball): private tar-rootdir := tar-install
-$(tar-pkg-tarball): tar-install
-endef
-
-$(foreach x, tar tar.gz tar.bz2 tar.xz tar.zst, $(eval $(call tar-pkg-rule,$(x))))
+tar%-pkg: linux-$(KERNELRELEASE)-$(ARCH).tar.% FORCE
+       @:
 
 # perf-tar*-src-pkg - generate a source tarball with perf source
 # ---------------------------------------------------------------------------
 
-perf-tar-src-pkg-tarball = perf-$(KERNELVERSION).$(1)
-perf-tar-src-pkg-phony   = perf-$(subst .,,$(1))-src-pkg
-
-quiet_cmd_stage_perf_src = STAGE   $@
-      cmd_stage_perf_src = \
-       rm -rf $@; \
-       mkdir -p $@; \
-       tar -c -f - --exclude-from=$<_exclude -C $(srctree) --files-from=$(srctree)/tools/perf/MANIFEST | \
-       tar -x -f - -C $@
-
-.tmp_perf: .tmp_filelist
-       $(call cmd,stage_perf_src)
-
-filechk_perf_head = \
-       if test -z "$(git -C $(srctree) rev-parse --show-cdup 2>/dev/null)" && \
-              head=$$(git -C $(srctree) rev-parse --verify HEAD 2>/dev/null); then \
-               echo $$head; \
-       else \
-               echo "not a git tree"; \
-       fi
+.tmp_perf:
+       $(Q)mkdir .tmp_perf
 
-.tmp_perf/HEAD: .tmp_perf FORCE
-       $(call filechk,perf_head)
+.tmp_perf/HEAD: .tmp_HEAD | .tmp_perf
+       $(call cmd,copy)
 
 quiet_cmd_perf_version_file = GEN     $@
       cmd_perf_version_file = cd $(srctree)/tools/perf; util/PERF-VERSION-GEN $(dir $(abspath $@))
 
-# PERF-VERSION-FILE and HEAD are independent, but this avoids updating the
+# PERF-VERSION-FILE and .tmp_HEAD are independent, but this avoids updating the
 # timestamp of PERF-VERSION-FILE.
 # The best is to fix tools/perf/util/PERF-VERSION-GEN.
-.tmp_perf/PERF-VERSION-FILE: .tmp_perf/HEAD $(srctree)/tools/perf/util/PERF-VERSION-GEN
+.tmp_perf/PERF-VERSION-FILE: .tmp_HEAD $(srctree)/tools/perf/util/PERF-VERSION-GEN | .tmp_perf
        $(call cmd,perf_version_file)
 
-define perf-tar-src-pkg-rule
-PHONY += $(perf-tar-src-pkg-phony)
-$(perf-tar-src-pkg-phony): $(perf-tar-src-pkg-tarball)
-       @:
+quiet_cmd_archive_perf = ARCHIVE $@
+      cmd_archive_perf = \
+       git -C $(srctree) archive --output=$$(realpath $@) --prefix=$(basename $@)/ \
+       --add-file=$$(realpath $(word 2, $^)) \
+       --add-file=$$(realpath $(word 3, $^)) \
+       $$(cat $(word 2, $^))^{tree} $$(cat $<)
 
-$(perf-tar-src-pkg-tarball): private tar-rootdir := .tmp_perf
-$(perf-tar-src-pkg-tarball): .tmp_filelist .tmp_perf/HEAD .tmp_perf/PERF-VERSION-FILE
-endef
+targets += perf-$(KERNELVERSION).tar
+perf-$(KERNELVERSION).tar: tools/perf/MANIFEST .tmp_perf/HEAD .tmp_perf/PERF-VERSION-FILE FORCE
+       $(call if_changed,archive_perf)
+
+PHONY += perf-tar-src-pkg
+perf-tar-src-pkg: perf-$(KERNELVERSION).tar
+       @:
 
-$(foreach x, tar tar.gz tar.bz2 tar.xz tar.zst, $(eval $(call perf-tar-src-pkg-rule,$(x))))
+perf-tar%-src-pkg: perf-$(KERNELVERSION).tar.% FORCE
+       @:
 
 # Help text displayed when executing 'make help'
 # ---------------------------------------------------------------------------
@@ -243,4 +224,13 @@ help:
 PHONY += FORCE
 FORCE:
 
+# Read all saved command lines and dependencies for the $(targets) we
+# may be building above, using $(if_changed{,_dep}). As an
+# optimization, we don't need to read them if the target does not
+# exist, we will rebuild anyway in that case.
+
+existing-targets := $(wildcard $(sort $(targets)))
+
+-include $(foreach f,$(existing-targets),$(dir $(f)).$(notdir $(f)).cmd)
+
 .PHONY: $(PHONY)
diff --git a/scripts/check-git b/scripts/check-git
new file mode 100755 (executable)
index 0000000..2ca6c5d
--- /dev/null
@@ -0,0 +1,14 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# succeed if we are in a git repository
+
+srctree="$(dirname $0)/.."
+
+if ! git -C "${srctree}" rev-parse --verify HEAD >/dev/null 2>/dev/null; then
+       exit 1
+fi
+
+if ! test -z $(git -C "${srctree}" rev-parse --show-cdup 2>/dev/null); then
+       exit 1
+fi
index f33e61aca93d34ee4cc05b1266f7449ff71fc213..1e5d2eeb726df00e5be8b7aef040b97b011546c8 100755 (executable)
@@ -114,7 +114,6 @@ cat << EOF
 #define __IGNORE_truncate
 #define __IGNORE_stat
 #define __IGNORE_lstat
-#define __IGNORE_fstat
 #define __IGNORE_fcntl
 #define __IGNORE_fadvise64
 #define __IGNORE_newfstatat
@@ -255,6 +254,9 @@ cat << EOF
 /* 64-bit ports never needed these, and new 32-bit ports can use statx */
 #define __IGNORE_fstat64
 #define __IGNORE_fstatat64
+
+/* Newer ports are not required to provide fstat in favor of statx */
+#define __IGNORE_fstat
 EOF
 }
 
index 8a68179a98a3986ed12a25bcbb0e535c5c54a5f4..a239a87e7bec1c75ffba780d291fb7493c868780 100644 (file)
@@ -119,6 +119,7 @@ static bool is_ignored_symbol(const char *name, char type)
                "kallsyms_markers",
                "kallsyms_token_table",
                "kallsyms_token_index",
+               "kallsyms_seqs_of_names",
                /* Exclude linker generated symbols which vary between passes */
                "_SDA_BASE_",           /* ppc */
                "_SDA2_BASE_",          /* ppc */
index b7c9f1dd5e4229df71e99ae9585424155ac101f0..992575f1e97693af4f715bb66d6dfd7f3a93f58b 100644 (file)
@@ -1226,10 +1226,12 @@ static void (*conf_changed_callback)(void);
 
 void conf_set_changed(bool val)
 {
-       if (conf_changed_callback && conf_changed != val)
-               conf_changed_callback();
+       bool changed = conf_changed != val;
 
        conf_changed = val;
+
+       if (conf_changed_callback && changed)
+               conf_changed_callback();
 }
 
 bool conf_get_changed(void)
diff --git a/scripts/list-gitignored.c b/scripts/list-gitignored.c
deleted file mode 100644 (file)
index f9941f8..0000000
+++ /dev/null
@@ -1,1057 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-//
-// Traverse the source tree, parsing all .gitignore files, and print file paths
-// that are ignored by git.
-// The output is suitable to the --exclude-from option of tar.
-// This is useful until the --exclude-vcs-ignores option gets working correctly.
-//
-// Copyright (C) 2023 Masahiro Yamada <masahiroy@kernel.org>
-//                      (a lot of code imported from GIT)
-
-#include <assert.h>
-#include <dirent.h>
-#include <errno.h>
-#include <fcntl.h>
-#include <getopt.h>
-#include <stdarg.h>
-#include <stdbool.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-// Imported from commit 23c56f7bd5f1667f8b793d796bf30e39545920f6 in GIT
-//
-//---------------------------(IMPORT FROM GIT BEGIN)---------------------------
-
-// Copied from environment.c
-
-static bool ignore_case;
-
-// Copied from git-compat-util.h
-
-/* Sane ctype - no locale, and works with signed chars */
-#undef isascii
-#undef isspace
-#undef isdigit
-#undef isalpha
-#undef isalnum
-#undef isprint
-#undef islower
-#undef isupper
-#undef tolower
-#undef toupper
-#undef iscntrl
-#undef ispunct
-#undef isxdigit
-
-static const unsigned char sane_ctype[256];
-#define GIT_SPACE 0x01
-#define GIT_DIGIT 0x02
-#define GIT_ALPHA 0x04
-#define GIT_GLOB_SPECIAL 0x08
-#define GIT_REGEX_SPECIAL 0x10
-#define GIT_PATHSPEC_MAGIC 0x20
-#define GIT_CNTRL 0x40
-#define GIT_PUNCT 0x80
-#define sane_istest(x,mask) ((sane_ctype[(unsigned char)(x)] & (mask)) != 0)
-#define isascii(x) (((x) & ~0x7f) == 0)
-#define isspace(x) sane_istest(x,GIT_SPACE)
-#define isdigit(x) sane_istest(x,GIT_DIGIT)
-#define isalpha(x) sane_istest(x,GIT_ALPHA)
-#define isalnum(x) sane_istest(x,GIT_ALPHA | GIT_DIGIT)
-#define isprint(x) ((x) >= 0x20 && (x) <= 0x7e)
-#define islower(x) sane_iscase(x, 1)
-#define isupper(x) sane_iscase(x, 0)
-#define is_glob_special(x) sane_istest(x,GIT_GLOB_SPECIAL)
-#define iscntrl(x) (sane_istest(x,GIT_CNTRL))
-#define ispunct(x) sane_istest(x, GIT_PUNCT | GIT_REGEX_SPECIAL | \
-               GIT_GLOB_SPECIAL | GIT_PATHSPEC_MAGIC)
-#define isxdigit(x) (hexval_table[(unsigned char)(x)] != -1)
-#define tolower(x) sane_case((unsigned char)(x), 0x20)
-#define toupper(x) sane_case((unsigned char)(x), 0)
-
-static inline int sane_case(int x, int high)
-{
-       if (sane_istest(x, GIT_ALPHA))
-               x = (x & ~0x20) | high;
-       return x;
-}
-
-static inline int sane_iscase(int x, int is_lower)
-{
-       if (!sane_istest(x, GIT_ALPHA))
-               return 0;
-
-       if (is_lower)
-               return (x & 0x20) != 0;
-       else
-               return (x & 0x20) == 0;
-}
-
-// Copied from ctype.c
-
-enum {
-       S = GIT_SPACE,
-       A = GIT_ALPHA,
-       D = GIT_DIGIT,
-       G = GIT_GLOB_SPECIAL,   /* *, ?, [, \\ */
-       R = GIT_REGEX_SPECIAL,  /* $, (, ), +, ., ^, {, | */
-       P = GIT_PATHSPEC_MAGIC, /* other non-alnum, except for ] and } */
-       X = GIT_CNTRL,
-       U = GIT_PUNCT,
-       Z = GIT_CNTRL | GIT_SPACE
-};
-
-static const unsigned char sane_ctype[256] = {
-       X, X, X, X, X, X, X, X, X, Z, Z, X, X, Z, X, X,         /*   0.. 15 */
-       X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, X,         /*  16.. 31 */
-       S, P, P, P, R, P, P, P, R, R, G, R, P, P, R, P,         /*  32.. 47 */
-       D, D, D, D, D, D, D, D, D, D, P, P, P, P, P, G,         /*  48.. 63 */
-       P, A, A, A, A, A, A, A, A, A, A, A, A, A, A, A,         /*  64.. 79 */
-       A, A, A, A, A, A, A, A, A, A, A, G, G, U, R, P,         /*  80.. 95 */
-       P, A, A, A, A, A, A, A, A, A, A, A, A, A, A, A,         /*  96..111 */
-       A, A, A, A, A, A, A, A, A, A, A, R, R, U, P, X,         /* 112..127 */
-       /* Nothing in the 128.. range */
-};
-
-// Copied from hex.c
-
-static const signed char hexval_table[256] = {
-        -1, -1, -1, -1, -1, -1, -1, -1,                /* 00-07 */
-        -1, -1, -1, -1, -1, -1, -1, -1,                /* 08-0f */
-        -1, -1, -1, -1, -1, -1, -1, -1,                /* 10-17 */
-        -1, -1, -1, -1, -1, -1, -1, -1,                /* 18-1f */
-        -1, -1, -1, -1, -1, -1, -1, -1,                /* 20-27 */
-        -1, -1, -1, -1, -1, -1, -1, -1,                /* 28-2f */
-         0,  1,  2,  3,  4,  5,  6,  7,                /* 30-37 */
-         8,  9, -1, -1, -1, -1, -1, -1,                /* 38-3f */
-        -1, 10, 11, 12, 13, 14, 15, -1,                /* 40-47 */
-        -1, -1, -1, -1, -1, -1, -1, -1,                /* 48-4f */
-        -1, -1, -1, -1, -1, -1, -1, -1,                /* 50-57 */
-        -1, -1, -1, -1, -1, -1, -1, -1,                /* 58-5f */
-        -1, 10, 11, 12, 13, 14, 15, -1,                /* 60-67 */
-        -1, -1, -1, -1, -1, -1, -1, -1,                /* 68-67 */
-        -1, -1, -1, -1, -1, -1, -1, -1,                /* 70-77 */
-        -1, -1, -1, -1, -1, -1, -1, -1,                /* 78-7f */
-        -1, -1, -1, -1, -1, -1, -1, -1,                /* 80-87 */
-        -1, -1, -1, -1, -1, -1, -1, -1,                /* 88-8f */
-        -1, -1, -1, -1, -1, -1, -1, -1,                /* 90-97 */
-        -1, -1, -1, -1, -1, -1, -1, -1,                /* 98-9f */
-        -1, -1, -1, -1, -1, -1, -1, -1,                /* a0-a7 */
-        -1, -1, -1, -1, -1, -1, -1, -1,                /* a8-af */
-        -1, -1, -1, -1, -1, -1, -1, -1,                /* b0-b7 */
-        -1, -1, -1, -1, -1, -1, -1, -1,                /* b8-bf */
-        -1, -1, -1, -1, -1, -1, -1, -1,                /* c0-c7 */
-        -1, -1, -1, -1, -1, -1, -1, -1,                /* c8-cf */
-        -1, -1, -1, -1, -1, -1, -1, -1,                /* d0-d7 */
-        -1, -1, -1, -1, -1, -1, -1, -1,                /* d8-df */
-        -1, -1, -1, -1, -1, -1, -1, -1,                /* e0-e7 */
-        -1, -1, -1, -1, -1, -1, -1, -1,                /* e8-ef */
-        -1, -1, -1, -1, -1, -1, -1, -1,                /* f0-f7 */
-        -1, -1, -1, -1, -1, -1, -1, -1,                /* f8-ff */
-};
-
-// Copied from wildmatch.h
-
-#define WM_CASEFOLD 1
-#define WM_PATHNAME 2
-
-#define WM_NOMATCH 1
-#define WM_MATCH 0
-#define WM_ABORT_ALL -1
-#define WM_ABORT_TO_STARSTAR -2
-
-// Copied from wildmatch.c
-
-typedef unsigned char uchar;
-
-// local modification: remove NEGATE_CLASS(2)
-
-#define CC_EQ(class, len, litmatch) ((len) == sizeof (litmatch)-1 \
-                                   && *(class) == *(litmatch) \
-                                   && strncmp((char*)class, litmatch, len) == 0)
-
-// local modification: simpilify macros
-#define ISBLANK(c) ((c) == ' ' || (c) == '\t')
-#define ISGRAPH(c) (isprint(c) && !isspace(c))
-#define ISPRINT(c) isprint(c)
-#define ISDIGIT(c) isdigit(c)
-#define ISALNUM(c) isalnum(c)
-#define ISALPHA(c) isalpha(c)
-#define ISCNTRL(c) iscntrl(c)
-#define ISLOWER(c) islower(c)
-#define ISPUNCT(c) ispunct(c)
-#define ISSPACE(c) isspace(c)
-#define ISUPPER(c) isupper(c)
-#define ISXDIGIT(c) isxdigit(c)
-
-/* Match pattern "p" against "text" */
-static int dowild(const uchar *p, const uchar *text, unsigned int flags)
-{
-       uchar p_ch;
-       const uchar *pattern = p;
-
-       for ( ; (p_ch = *p) != '\0'; text++, p++) {
-               int matched, match_slash, negated;
-               uchar t_ch, prev_ch;
-               if ((t_ch = *text) == '\0' && p_ch != '*')
-                       return WM_ABORT_ALL;
-               if ((flags & WM_CASEFOLD) && ISUPPER(t_ch))
-                       t_ch = tolower(t_ch);
-               if ((flags & WM_CASEFOLD) && ISUPPER(p_ch))
-                       p_ch = tolower(p_ch);
-               switch (p_ch) {
-               case '\\':
-                       /* Literal match with following character.  Note that the test
-                        * in "default" handles the p[1] == '\0' failure case. */
-                       p_ch = *++p;
-                       /* FALLTHROUGH */
-               default:
-                       if (t_ch != p_ch)
-                               return WM_NOMATCH;
-                       continue;
-               case '?':
-                       /* Match anything but '/'. */
-                       if ((flags & WM_PATHNAME) && t_ch == '/')
-                               return WM_NOMATCH;
-                       continue;
-               case '*':
-                       if (*++p == '*') {
-                               const uchar *prev_p = p - 2;
-                               while (*++p == '*') {}
-                               if (!(flags & WM_PATHNAME))
-                                       /* without WM_PATHNAME, '*' == '**' */
-                                       match_slash = 1;
-                               else if ((prev_p < pattern || *prev_p == '/') &&
-                                   (*p == '\0' || *p == '/' ||
-                                    (p[0] == '\\' && p[1] == '/'))) {
-                                       /*
-                                        * Assuming we already match 'foo/' and are at
-                                        * <star star slash>, just assume it matches
-                                        * nothing and go ahead match the rest of the
-                                        * pattern with the remaining string. This
-                                        * helps make foo/<*><*>/bar (<> because
-                                        * otherwise it breaks C comment syntax) match
-                                        * both foo/bar and foo/a/bar.
-                                        */
-                                       if (p[0] == '/' &&
-                                           dowild(p + 1, text, flags) == WM_MATCH)
-                                               return WM_MATCH;
-                                       match_slash = 1;
-                               } else /* WM_PATHNAME is set */
-                                       match_slash = 0;
-                       } else
-                               /* without WM_PATHNAME, '*' == '**' */
-                               match_slash = flags & WM_PATHNAME ? 0 : 1;
-                       if (*p == '\0') {
-                               /* Trailing "**" matches everything.  Trailing "*" matches
-                                * only if there are no more slash characters. */
-                               if (!match_slash) {
-                                       if (strchr((char *)text, '/'))
-                                               return WM_NOMATCH;
-                               }
-                               return WM_MATCH;
-                       } else if (!match_slash && *p == '/') {
-                               /*
-                                * _one_ asterisk followed by a slash
-                                * with WM_PATHNAME matches the next
-                                * directory
-                                */
-                               const char *slash = strchr((char*)text, '/');
-                               if (!slash)
-                                       return WM_NOMATCH;
-                               text = (const uchar*)slash;
-                               /* the slash is consumed by the top-level for loop */
-                               break;
-                       }
-                       while (1) {
-                               if (t_ch == '\0')
-                                       break;
-                               /*
-                                * Try to advance faster when an asterisk is
-                                * followed by a literal. We know in this case
-                                * that the string before the literal
-                                * must belong to "*".
-                                * If match_slash is false, do not look past
-                                * the first slash as it cannot belong to '*'.
-                                */
-                               if (!is_glob_special(*p)) {
-                                       p_ch = *p;
-                                       if ((flags & WM_CASEFOLD) && ISUPPER(p_ch))
-                                               p_ch = tolower(p_ch);
-                                       while ((t_ch = *text) != '\0' &&
-                                              (match_slash || t_ch != '/')) {
-                                               if ((flags & WM_CASEFOLD) && ISUPPER(t_ch))
-                                                       t_ch = tolower(t_ch);
-                                               if (t_ch == p_ch)
-                                                       break;
-                                               text++;
-                                       }
-                                       if (t_ch != p_ch)
-                                               return WM_NOMATCH;
-                               }
-                               if ((matched = dowild(p, text, flags)) != WM_NOMATCH) {
-                                       if (!match_slash || matched != WM_ABORT_TO_STARSTAR)
-                                               return matched;
-                               } else if (!match_slash && t_ch == '/')
-                                       return WM_ABORT_TO_STARSTAR;
-                               t_ch = *++text;
-                       }
-                       return WM_ABORT_ALL;
-               case '[':
-                       p_ch = *++p;
-                       if (p_ch == '^')
-                               p_ch = '!';
-                       /* Assign literal 1/0 because of "matched" comparison. */
-                       negated = p_ch == '!' ? 1 : 0;
-                       if (negated) {
-                               /* Inverted character class. */
-                               p_ch = *++p;
-                       }
-                       prev_ch = 0;
-                       matched = 0;
-                       do {
-                               if (!p_ch)
-                                       return WM_ABORT_ALL;
-                               if (p_ch == '\\') {
-                                       p_ch = *++p;
-                                       if (!p_ch)
-                                               return WM_ABORT_ALL;
-                                       if (t_ch == p_ch)
-                                               matched = 1;
-                               } else if (p_ch == '-' && prev_ch && p[1] && p[1] != ']') {
-                                       p_ch = *++p;
-                                       if (p_ch == '\\') {
-                                               p_ch = *++p;
-                                               if (!p_ch)
-                                                       return WM_ABORT_ALL;
-                                       }
-                                       if (t_ch <= p_ch && t_ch >= prev_ch)
-                                               matched = 1;
-                                       else if ((flags & WM_CASEFOLD) && ISLOWER(t_ch)) {
-                                               uchar t_ch_upper = toupper(t_ch);
-                                               if (t_ch_upper <= p_ch && t_ch_upper >= prev_ch)
-                                                       matched = 1;
-                                       }
-                                       p_ch = 0; /* This makes "prev_ch" get set to 0. */
-                               } else if (p_ch == '[' && p[1] == ':') {
-                                       const uchar *s;
-                                       int i;
-                                       for (s = p += 2; (p_ch = *p) && p_ch != ']'; p++) {} /*SHARED ITERATOR*/
-                                       if (!p_ch)
-                                               return WM_ABORT_ALL;
-                                       i = p - s - 1;
-                                       if (i < 0 || p[-1] != ':') {
-                                               /* Didn't find ":]", so treat like a normal set. */
-                                               p = s - 2;
-                                               p_ch = '[';
-                                               if (t_ch == p_ch)
-                                                       matched = 1;
-                                               continue;
-                                       }
-                                       if (CC_EQ(s,i, "alnum")) {
-                                               if (ISALNUM(t_ch))
-                                                       matched = 1;
-                                       } else if (CC_EQ(s,i, "alpha")) {
-                                               if (ISALPHA(t_ch))
-                                                       matched = 1;
-                                       } else if (CC_EQ(s,i, "blank")) {
-                                               if (ISBLANK(t_ch))
-                                                       matched = 1;
-                                       } else if (CC_EQ(s,i, "cntrl")) {
-                                               if (ISCNTRL(t_ch))
-                                                       matched = 1;
-                                       } else if (CC_EQ(s,i, "digit")) {
-                                               if (ISDIGIT(t_ch))
-                                                       matched = 1;
-                                       } else if (CC_EQ(s,i, "graph")) {
-                                               if (ISGRAPH(t_ch))
-                                                       matched = 1;
-                                       } else if (CC_EQ(s,i, "lower")) {
-                                               if (ISLOWER(t_ch))
-                                                       matched = 1;
-                                       } else if (CC_EQ(s,i, "print")) {
-                                               if (ISPRINT(t_ch))
-                                                       matched = 1;
-                                       } else if (CC_EQ(s,i, "punct")) {
-                                               if (ISPUNCT(t_ch))
-                                                       matched = 1;
-                                       } else if (CC_EQ(s,i, "space")) {
-                                               if (ISSPACE(t_ch))
-                                                       matched = 1;
-                                       } else if (CC_EQ(s,i, "upper")) {
-                                               if (ISUPPER(t_ch))
-                                                       matched = 1;
-                                               else if ((flags & WM_CASEFOLD) && ISLOWER(t_ch))
-                                                       matched = 1;
-                                       } else if (CC_EQ(s,i, "xdigit")) {
-                                               if (ISXDIGIT(t_ch))
-                                                       matched = 1;
-                                       } else /* malformed [:class:] string */
-                                               return WM_ABORT_ALL;
-                                       p_ch = 0; /* This makes "prev_ch" get set to 0. */
-                               } else if (t_ch == p_ch)
-                                       matched = 1;
-                       } while (prev_ch = p_ch, (p_ch = *++p) != ']');
-                       if (matched == negated ||
-                           ((flags & WM_PATHNAME) && t_ch == '/'))
-                               return WM_NOMATCH;
-                       continue;
-               }
-       }
-
-       return *text ? WM_NOMATCH : WM_MATCH;
-}
-
-/* Match the "pattern" against the "text" string. */
-static int wildmatch(const char *pattern, const char *text, unsigned int flags)
-{
-       // local modification: move WM_CASEFOLD here
-       if (ignore_case)
-               flags |= WM_CASEFOLD;
-
-       return dowild((const uchar*)pattern, (const uchar*)text, flags);
-}
-
-// Copied from dir.h
-
-#define PATTERN_FLAG_NODIR 1
-#define PATTERN_FLAG_ENDSWITH 4
-#define PATTERN_FLAG_MUSTBEDIR 8
-#define PATTERN_FLAG_NEGATIVE 16
-
-// Copied from dir.c
-
-static int fspathncmp(const char *a, const char *b, size_t count)
-{
-       return ignore_case ? strncasecmp(a, b, count) : strncmp(a, b, count);
-}
-
-static int simple_length(const char *match)
-{
-       int len = -1;
-
-       for (;;) {
-               unsigned char c = *match++;
-               len++;
-               if (c == '\0' || is_glob_special(c))
-                       return len;
-       }
-}
-
-static int no_wildcard(const char *string)
-{
-       return string[simple_length(string)] == '\0';
-}
-
-static void parse_path_pattern(const char **pattern,
-                              int *patternlen,
-                              unsigned *flags,
-                              int *nowildcardlen)
-{
-       const char *p = *pattern;
-       size_t i, len;
-
-       *flags = 0;
-       if (*p == '!') {
-               *flags |= PATTERN_FLAG_NEGATIVE;
-               p++;
-       }
-       len = strlen(p);
-       if (len && p[len - 1] == '/') {
-               len--;
-               *flags |= PATTERN_FLAG_MUSTBEDIR;
-       }
-       for (i = 0; i < len; i++) {
-               if (p[i] == '/')
-                       break;
-       }
-       if (i == len)
-               *flags |= PATTERN_FLAG_NODIR;
-       *nowildcardlen = simple_length(p);
-       /*
-        * we should have excluded the trailing slash from 'p' too,
-        * but that's one more allocation. Instead just make sure
-        * nowildcardlen does not exceed real patternlen
-        */
-       if (*nowildcardlen > len)
-               *nowildcardlen = len;
-       if (*p == '*' && no_wildcard(p + 1))
-               *flags |= PATTERN_FLAG_ENDSWITH;
-       *pattern = p;
-       *patternlen = len;
-}
-
-static void trim_trailing_spaces(char *buf)
-{
-       char *p, *last_space = NULL;
-
-       for (p = buf; *p; p++)
-               switch (*p) {
-               case ' ':
-                       if (!last_space)
-                               last_space = p;
-                       break;
-               case '\\':
-                       p++;
-                       if (!*p)
-                               return;
-                       /* fallthrough */
-               default:
-                       last_space = NULL;
-               }
-
-       if (last_space)
-               *last_space = '\0';
-}
-
-static int match_basename(const char *basename, int basenamelen,
-                         const char *pattern, int prefix, int patternlen,
-                         unsigned flags)
-{
-       if (prefix == patternlen) {
-               if (patternlen == basenamelen &&
-                   !fspathncmp(pattern, basename, basenamelen))
-                       return 1;
-       } else if (flags & PATTERN_FLAG_ENDSWITH) {
-               /* "*literal" matching against "fooliteral" */
-               if (patternlen - 1 <= basenamelen &&
-                   !fspathncmp(pattern + 1,
-                                  basename + basenamelen - (patternlen - 1),
-                                  patternlen - 1))
-                       return 1;
-       } else {
-               // local modification: call wildmatch() directly
-               if (!wildmatch(pattern, basename, flags))
-                       return 1;
-       }
-       return 0;
-}
-
-static int match_pathname(const char *pathname, int pathlen,
-                         const char *base, int baselen,
-                         const char *pattern, int prefix, int patternlen)
-{
-       // local modification: remove local variables
-
-       /*
-        * match with FNM_PATHNAME; the pattern has base implicitly
-        * in front of it.
-        */
-       if (*pattern == '/') {
-               pattern++;
-               patternlen--;
-               prefix--;
-       }
-
-       /*
-        * baselen does not count the trailing slash. base[] may or
-        * may not end with a trailing slash though.
-        */
-       if (pathlen < baselen + 1 ||
-           (baselen && pathname[baselen] != '/') ||
-           fspathncmp(pathname, base, baselen))
-               return 0;
-
-       // local modification: simplified because always baselen > 0
-       pathname += baselen + 1;
-       pathlen -= baselen + 1;
-
-       if (prefix) {
-               /*
-                * if the non-wildcard part is longer than the
-                * remaining pathname, surely it cannot match.
-                */
-               if (prefix > pathlen)
-                       return 0;
-
-               if (fspathncmp(pattern, pathname, prefix))
-                       return 0;
-               pattern += prefix;
-               patternlen -= prefix;
-               pathname += prefix;
-               pathlen -= prefix;
-
-               /*
-                * If the whole pattern did not have a wildcard,
-                * then our prefix match is all we need; we
-                * do not need to call fnmatch at all.
-                */
-               if (!patternlen && !pathlen)
-                       return 1;
-       }
-
-       // local modification: call wildmatch() directly
-       return !wildmatch(pattern, pathname, WM_PATHNAME);
-}
-
-// Copied from git/utf8.c
-
-static const char utf8_bom[] = "\357\273\277";
-
-//----------------------------(IMPORT FROM GIT END)----------------------------
-
-struct pattern {
-       unsigned int flags;
-       int nowildcardlen;
-       int patternlen;
-       int dirlen;
-       char pattern[];
-};
-
-static struct pattern **pattern_list;
-static int nr_patterns, alloced_patterns;
-
-// Remember the number of patterns at each directory level
-static int *nr_patterns_at;
-// Track the current/max directory level;
-static int depth, max_depth;
-static bool debug_on;
-static FILE *out_fp, *stat_fp;
-static char *prefix = "";
-static char *progname;
-
-static void __attribute__((noreturn)) perror_exit(const char *s)
-{
-       perror(s);
-
-       exit(EXIT_FAILURE);
-}
-
-static void __attribute__((noreturn)) error_exit(const char *fmt, ...)
-{
-       va_list args;
-
-       fprintf(stderr, "%s: error: ", progname);
-
-       va_start(args, fmt);
-       vfprintf(stderr, fmt, args);
-       va_end(args);
-
-       exit(EXIT_FAILURE);
-}
-
-static void debug(const char *fmt, ...)
-{
-       va_list args;
-       int i;
-
-       if (!debug_on)
-               return;
-
-       fprintf(stderr, "[DEBUG] ");
-
-       for (i = 0; i < depth * 2; i++)
-               fputc(' ', stderr);
-
-       va_start(args, fmt);
-       vfprintf(stderr, fmt, args);
-       va_end(args);
-}
-
-static void *xrealloc(void *ptr, size_t size)
-{
-       ptr = realloc(ptr, size);
-       if (!ptr)
-               perror_exit(progname);
-
-       return ptr;
-}
-
-static void *xmalloc(size_t size)
-{
-       return xrealloc(NULL, size);
-}
-
-// similar to last_matching_pattern_from_list() in GIT
-static bool is_ignored(const char *path, int pathlen, int dirlen, bool is_dir)
-{
-       int i;
-
-       // Search in the reverse order because the last matching pattern wins.
-       for (i = nr_patterns - 1; i >= 0; i--) {
-               struct pattern *p = pattern_list[i];
-               unsigned int flags = p->flags;
-               const char *gitignore_dir = p->pattern + p->patternlen + 1;
-               bool ignored;
-
-               if ((flags & PATTERN_FLAG_MUSTBEDIR) && !is_dir)
-                       continue;
-
-               if (flags & PATTERN_FLAG_NODIR) {
-                       if (!match_basename(path + dirlen + 1,
-                                           pathlen - dirlen - 1,
-                                           p->pattern,
-                                           p->nowildcardlen,
-                                           p->patternlen,
-                                           p->flags))
-                               continue;
-               } else {
-                       if (!match_pathname(path, pathlen,
-                                           gitignore_dir, p->dirlen,
-                                           p->pattern,
-                                           p->nowildcardlen,
-                                           p->patternlen))
-                               continue;
-               }
-
-               debug("%s: matches %s%s%s (%s/.gitignore)\n", path,
-                     flags & PATTERN_FLAG_NEGATIVE ? "!" : "", p->pattern,
-                     flags & PATTERN_FLAG_MUSTBEDIR ? "/" : "",
-                     gitignore_dir);
-
-               ignored = (flags & PATTERN_FLAG_NEGATIVE) == 0;
-               if (ignored)
-                       debug("Ignore: %s\n", path);
-
-               return ignored;
-       }
-
-       debug("%s: no match\n", path);
-
-       return false;
-}
-
-static void add_pattern(const char *string, const char *dir, int dirlen)
-{
-       struct pattern *p;
-       int patternlen, nowildcardlen;
-       unsigned int flags;
-
-       parse_path_pattern(&string, &patternlen, &flags, &nowildcardlen);
-
-       if (patternlen == 0)
-               return;
-
-       p = xmalloc(sizeof(*p) + patternlen + dirlen + 2);
-
-       memcpy(p->pattern, string, patternlen);
-       p->pattern[patternlen] = 0;
-       memcpy(p->pattern + patternlen + 1, dir, dirlen);
-       p->pattern[patternlen + 1 + dirlen] = 0;
-
-       p->patternlen = patternlen;
-       p->nowildcardlen = nowildcardlen;
-       p->dirlen = dirlen;
-       p->flags = flags;
-
-       debug("Add pattern: %s%s%s\n",
-             flags & PATTERN_FLAG_NEGATIVE ? "!" : "", p->pattern,
-             flags & PATTERN_FLAG_MUSTBEDIR ? "/" : "");
-
-       if (nr_patterns >= alloced_patterns) {
-               alloced_patterns += 128;
-               pattern_list = xrealloc(pattern_list,
-                                       sizeof(*pattern_list) * alloced_patterns);
-       }
-
-       pattern_list[nr_patterns++] = p;
-}
-
-// similar to add_patterns_from_buffer() in GIT
-static void add_patterns_from_gitignore(const char *dir, int dirlen)
-{
-       struct stat st;
-       char path[PATH_MAX], *buf, *entry;
-       size_t size;
-       int fd, pathlen, i;
-
-       pathlen = snprintf(path, sizeof(path), "%s/.gitignore", dir);
-       if (pathlen >= sizeof(path))
-               error_exit("%s: too long path was truncated\n", path);
-
-       fd = open(path, O_RDONLY | O_NOFOLLOW);
-       if (fd < 0) {
-               if (errno != ENOENT)
-                       return perror_exit(path);
-               return;
-       }
-
-       if (fstat(fd, &st) < 0)
-               perror_exit(path);
-
-       size = st.st_size;
-
-       buf = xmalloc(size + 1);
-       if (read(fd, buf, st.st_size) != st.st_size)
-               perror_exit(path);
-
-       buf[st.st_size] = '\n';
-       if (close(fd))
-               perror_exit(path);
-
-       debug("Parse %s\n", path);
-
-       entry = buf;
-
-       // skip utf8 bom
-       if (!strncmp(entry, utf8_bom, strlen(utf8_bom)))
-               entry += strlen(utf8_bom);
-
-       for (i = entry - buf; i < size; i++) {
-               if (buf[i] == '\n') {
-                       if (entry != buf + i && entry[0] != '#') {
-                               buf[i - (i && buf[i-1] == '\r')] = 0;
-                               trim_trailing_spaces(entry);
-                               add_pattern(entry, dir, dirlen);
-                       }
-                       entry = buf + i + 1;
-               }
-       }
-
-       free(buf);
-}
-
-// Save the current number of patterns and increment the depth
-static void increment_depth(void)
-{
-       if (depth >= max_depth) {
-               max_depth += 1;
-               nr_patterns_at = xrealloc(nr_patterns_at,
-                                         sizeof(*nr_patterns_at) * max_depth);
-       }
-
-       nr_patterns_at[depth] = nr_patterns;
-       depth++;
-}
-
-// Decrement the depth, and free up the patterns of this directory level.
-static void decrement_depth(void)
-{
-       depth--;
-       assert(depth >= 0);
-
-       while (nr_patterns > nr_patterns_at[depth])
-               free(pattern_list[--nr_patterns]);
-}
-
-static void print_path(const char *path)
-{
-       // The path always starts with "./"
-       assert(strlen(path) >= 2);
-
-       // Replace the root directory with a preferred prefix.
-       // This is useful for the tar command.
-       fprintf(out_fp, "%s%s\n", prefix, path + 2);
-}
-
-static void print_stat(const char *path, struct stat *st)
-{
-       if (!stat_fp)
-               return;
-
-       if (!S_ISREG(st->st_mode) && !S_ISLNK(st->st_mode))
-               return;
-
-       assert(strlen(path) >= 2);
-
-       fprintf(stat_fp, "%c %9ld %10ld %s\n",
-               S_ISLNK(st->st_mode) ? 'l' : '-',
-               st->st_size, st->st_mtim.tv_sec, path + 2);
-}
-
-// Traverse the entire directory tree, parsing .gitignore files.
-// Print file paths that are not tracked by git.
-//
-// Return true if all files under the directory are ignored, false otherwise.
-static bool traverse_directory(const char *dir, int dirlen)
-{
-       bool all_ignored = true;
-       DIR *dirp;
-
-       debug("Enter[%d]: %s\n", depth, dir);
-       increment_depth();
-
-       add_patterns_from_gitignore(dir, dirlen);
-
-       dirp = opendir(dir);
-       if (!dirp)
-               perror_exit(dir);
-
-       while (1) {
-               struct dirent *d;
-               struct stat st;
-               char path[PATH_MAX];
-               int pathlen;
-               bool ignored;
-
-               errno = 0;
-               d = readdir(dirp);
-               if (!d) {
-                       if (errno)
-                               perror_exit(dir);
-                       break;
-               }
-
-               if (!strcmp(d->d_name, "..") || !strcmp(d->d_name, "."))
-                       continue;
-
-               pathlen = snprintf(path, sizeof(path), "%s/%s", dir, d->d_name);
-               if (pathlen >= sizeof(path))
-                       error_exit("%s: too long path was truncated\n", path);
-
-               if (lstat(path, &st) < 0)
-                       perror_exit(path);
-
-               if ((!S_ISREG(st.st_mode) && !S_ISDIR(st.st_mode) && !S_ISLNK(st.st_mode)) ||
-                   is_ignored(path, pathlen, dirlen, S_ISDIR(st.st_mode))) {
-                       ignored = true;
-               } else {
-                       if (S_ISDIR(st.st_mode) && !S_ISLNK(st.st_mode))
-                               // If all the files in a directory are ignored,
-                               // let's ignore that directory as well. This
-                               // will avoid empty directories in the tarball.
-                               ignored = traverse_directory(path, pathlen);
-                       else
-                               ignored = false;
-               }
-
-               if (ignored) {
-                       print_path(path);
-               } else {
-                       print_stat(path, &st);
-                       all_ignored = false;
-               }
-       }
-
-       if (closedir(dirp))
-               perror_exit(dir);
-
-       decrement_depth();
-       debug("Leave[%d]: %s\n", depth, dir);
-
-       return all_ignored;
-}
-
-static void usage(void)
-{
-       fprintf(stderr,
-               "usage: %s [options]\n"
-               "\n"
-               "Show files that are ignored by git\n"
-               "\n"
-               "options:\n"
-               "  -d, --debug                  print debug messages to stderr\n"
-               "  -e, --exclude PATTERN        add the given exclude pattern\n"
-               "  -h, --help                   show this help message and exit\n"
-               "  -i, --ignore-case            Ignore case differences between the patterns and the files\n"
-               "  -o, --output FILE            output the ignored files to a file (default: '-', i.e. stdout)\n"
-               "  -p, --prefix PREFIX          prefix added to each path (default: empty string)\n"
-               "  -r, --rootdir DIR            root of the source tree (default: current working directory)\n"
-               "  -s, --stat FILE              output the file stat of non-ignored files to a file\n",
-               progname);
-}
-
-static void open_output(const char *pathname, FILE **fp)
-{
-       if (strcmp(pathname, "-")) {
-               *fp = fopen(pathname, "w");
-               if (!*fp)
-                       perror_exit(pathname);
-       } else {
-               *fp = stdout;
-       }
-}
-
-static void close_output(const char *pathname, FILE *fp)
-{
-       fflush(fp);
-
-       if (ferror(fp))
-               error_exit("not all data was written to the output\n");
-
-       if (fclose(fp))
-               perror_exit(pathname);
-}
-
-int main(int argc, char *argv[])
-{
-       const char *output = "-";
-       const char *rootdir = ".";
-       const char *stat = NULL;
-
-       progname = strrchr(argv[0], '/');
-       if (progname)
-               progname++;
-       else
-               progname = argv[0];
-
-       while (1) {
-               static struct option long_options[] = {
-                       {"debug",       no_argument,       NULL, 'd'},
-                       {"help",        no_argument,       NULL, 'h'},
-                       {"ignore-case", no_argument,       NULL, 'i'},
-                       {"output",      required_argument, NULL, 'o'},
-                       {"prefix",      required_argument, NULL, 'p'},
-                       {"rootdir",     required_argument, NULL, 'r'},
-                       {"stat",        required_argument, NULL, 's'},
-                       {"exclude",     required_argument, NULL, 'x'},
-                       {},
-               };
-
-               int c = getopt_long(argc, argv, "dhino:p:r:s:x:", long_options, NULL);
-
-               if (c == -1)
-                       break;
-
-               switch (c) {
-               case 'd':
-                       debug_on = true;
-                       break;
-               case 'h':
-                       usage();
-                       exit(0);
-               case 'i':
-                       ignore_case = true;
-                       break;
-               case 'o':
-                       output = optarg;
-                       break;
-               case 'p':
-                       prefix = optarg;
-                       break;
-               case 'r':
-                       rootdir = optarg;
-                       break;
-               case 's':
-                       stat = optarg;
-                       break;
-               case 'x':
-                       add_pattern(optarg, ".", strlen("."));
-                       break;
-               case '?':
-                       usage();
-                       /* fallthrough */
-               default:
-                       exit(EXIT_FAILURE);
-               }
-       }
-
-       open_output(output, &out_fp);
-       if (stat && stat[0])
-               open_output(stat, &stat_fp);
-
-       if (chdir(rootdir))
-               perror_exit(rootdir);
-
-       add_pattern(".git/", ".", strlen("."));
-
-       if (traverse_directory(".", strlen(".")))
-               print_path("./");
-
-       assert(depth == 0);
-
-       while (nr_patterns > 0)
-               free(pattern_list[--nr_patterns]);
-       free(pattern_list);
-       free(nr_patterns_at);
-
-       close_output(output, out_fp);
-       if (stat_fp)
-               close_output(stat, stat_fp);
-
-       return 0;
-}
index ff5e7d8e380bba654657bf43dc544a571fd90e28..c5ae57167d7ceb8c8a3633b77829057f45e278ff 100755 (executable)
@@ -51,7 +51,116 @@ create_package() {
        dpkg-deb $dpkg_deb_opts ${KDEB_COMPRESS:+-Z$KDEB_COMPRESS} --build "$pdir" ..
 }
 
-deploy_kernel_headers () {
+install_linux_image () {
+       pdir=$1
+       pname=$2
+
+       rm -rf ${pdir}
+
+       # Only some architectures with OF support have this target
+       if is_enabled CONFIG_OF_EARLY_FLATTREE && [ -d "${srctree}/arch/${SRCARCH}/boot/dts" ]; then
+               ${MAKE} -f ${srctree}/Makefile INSTALL_DTBS_PATH="${pdir}/usr/lib/linux-image-${KERNELRELEASE}" dtbs_install
+       fi
+
+       if is_enabled CONFIG_MODULES; then
+               ${MAKE} -f ${srctree}/Makefile INSTALL_MOD_PATH="${pdir}" modules_install
+               rm -f "${pdir}/lib/modules/${KERNELRELEASE}/build"
+               rm -f "${pdir}/lib/modules/${KERNELRELEASE}/source"
+               if [ "${SRCARCH}" = um ] ; then
+                       mkdir -p "${pdir}/usr/lib/uml/modules"
+                       mv "${pdir}/lib/modules/${KERNELRELEASE}" "${pdir}/usr/lib/uml/modules/${KERNELRELEASE}"
+               fi
+       fi
+
+       # Install the kernel
+       if [ "${ARCH}" = um ] ; then
+               mkdir -p "${pdir}/usr/bin" "${pdir}/usr/share/doc/${pname}"
+               cp System.map "${pdir}/usr/lib/uml/modules/${KERNELRELEASE}/System.map"
+               cp ${KCONFIG_CONFIG} "${pdir}/usr/share/doc/${pname}/config"
+               gzip "${pdir}/usr/share/doc/${pname}/config"
+       else
+               mkdir -p "${pdir}/boot"
+               cp System.map "${pdir}/boot/System.map-${KERNELRELEASE}"
+               cp ${KCONFIG_CONFIG} "${pdir}/boot/config-${KERNELRELEASE}"
+       fi
+
+       # Not all arches have the same installed path in debian
+       # XXX: have each arch Makefile export a variable of the canonical image install
+       # path instead
+       case "${SRCARCH}" in
+       um)
+               installed_image_path="usr/bin/linux-${KERNELRELEASE}";;
+       parisc|mips|powerpc)
+               installed_image_path="boot/vmlinux-${KERNELRELEASE}";;
+       *)
+               installed_image_path="boot/vmlinuz-${KERNELRELEASE}";;
+       esac
+       cp "$(${MAKE} -s -f ${srctree}/Makefile image_name)" "${pdir}/${installed_image_path}"
+
+       # Install the maintainer scripts
+       # Note: hook scripts under /etc/kernel are also executed by official Debian
+       # kernel packages, as well as kernel packages built using make-kpkg.
+       # make-kpkg sets $INITRD to indicate whether an initramfs is wanted, and
+       # so do we; recent versions of dracut and initramfs-tools will obey this.
+       debhookdir=${KDEB_HOOKDIR:-/etc/kernel}
+       for script in postinst postrm preinst prerm; do
+               mkdir -p "${pdir}${debhookdir}/${script}.d"
+
+               mkdir -p "${pdir}/DEBIAN"
+               cat <<-EOF > "${pdir}/DEBIAN/${script}"
+
+               #!/bin/sh
+
+               set -e
+
+               # Pass maintainer script parameters to hook scripts
+               export DEB_MAINT_PARAMS="\$*"
+
+               # Tell initramfs builder whether it's wanted
+               export INITRD=$(if_enabled_echo CONFIG_BLK_DEV_INITRD Yes No)
+
+               test -d ${debhookdir}/${script}.d && run-parts --arg="${KERNELRELEASE}" --arg="/${installed_image_path}" ${debhookdir}/${script}.d
+               exit 0
+               EOF
+               chmod 755 "${pdir}/DEBIAN/${script}"
+       done
+}
+
+install_linux_image_dbg () {
+       pdir=$1
+       image_pdir=$2
+
+       rm -rf ${pdir}
+
+       for module in $(find ${image_pdir}/lib/modules/ -name *.ko -printf '%P\n'); do
+               module=lib/modules/${module}
+               mkdir -p $(dirname ${pdir}/usr/lib/debug/${module})
+               # only keep debug symbols in the debug file
+               ${OBJCOPY} --only-keep-debug ${image_pdir}/${module} ${pdir}/usr/lib/debug/${module}
+               # strip original module from debug symbols
+               ${OBJCOPY} --strip-debug ${image_pdir}/${module}
+               # then add a link to those
+               ${OBJCOPY} --add-gnu-debuglink=${pdir}/usr/lib/debug/${module} ${image_pdir}/${module}
+       done
+
+       # re-sign stripped modules
+       if is_enabled CONFIG_MODULE_SIG_ALL; then
+               ${MAKE} -f ${srctree}/Makefile INSTALL_MOD_PATH="${image_pdir}" modules_sign
+       fi
+
+       # Build debug package
+       # Different tools want the image in different locations
+       # perf
+       mkdir -p ${pdir}/usr/lib/debug/lib/modules/${KERNELRELEASE}/
+       cp vmlinux ${pdir}/usr/lib/debug/lib/modules/${KERNELRELEASE}/
+       # systemtap
+       mkdir -p ${pdir}/usr/lib/debug/boot/
+       ln -s ../lib/modules/${KERNELRELEASE}/vmlinux ${pdir}/usr/lib/debug/boot/vmlinux-${KERNELRELEASE}
+       # kdump-tools
+       ln -s lib/modules/${KERNELRELEASE}/vmlinux ${pdir}/usr/lib/debug/vmlinux-${KERNELRELEASE}
+}
+
+install_kernel_headers () {
        pdir=$1
 
        rm -rf $pdir
@@ -89,7 +198,7 @@ deploy_kernel_headers () {
        ln -s /usr/src/linux-headers-$version $pdir/lib/modules/$version/build
 }
 
-deploy_libc_headers () {
+install_libc_headers () {
        pdir=$1
 
        rm -rf $pdir
@@ -104,132 +213,38 @@ deploy_libc_headers () {
        mv $pdir/usr/include/asm $pdir/usr/include/$host_arch/
 }
 
-version=$KERNELRELEASE
-tmpdir=debian/linux-image
-dbg_dir=debian/linux-image-dbg
-packagename=linux-image-$version
-dbg_packagename=$packagename-dbg
-
-if [ "$ARCH" = "um" ] ; then
-       packagename=user-mode-linux-$version
-fi
-
-# Not all arches have the same installed path in debian
-# XXX: have each arch Makefile export a variable of the canonical image install
-# path instead
-case $ARCH in
-um)
-       installed_image_path="usr/bin/linux-$version"
-       ;;
-parisc|mips|powerpc)
-       installed_image_path="boot/vmlinux-$version"
-       ;;
-*)
-       installed_image_path="boot/vmlinuz-$version"
-esac
-
-BUILD_DEBUG=$(if_enabled_echo CONFIG_DEBUG_INFO Yes)
-
-# Setup the directory structure
-rm -rf "$tmpdir" "$dbg_dir" debian/files
-mkdir -m 755 -p "$tmpdir/DEBIAN"
-mkdir -p "$tmpdir/lib" "$tmpdir/boot"
-
-# Install the kernel
-if [ "$ARCH" = "um" ] ; then
-       mkdir -p "$tmpdir/usr/lib/uml/modules/$version" "$tmpdir/usr/bin" "$tmpdir/usr/share/doc/$packagename"
-       cp System.map "$tmpdir/usr/lib/uml/modules/$version/System.map"
-       cp $KCONFIG_CONFIG "$tmpdir/usr/share/doc/$packagename/config"
-       gzip "$tmpdir/usr/share/doc/$packagename/config"
-else
-       cp System.map "$tmpdir/boot/System.map-$version"
-       cp $KCONFIG_CONFIG "$tmpdir/boot/config-$version"
-fi
-cp "$($MAKE -s -f $srctree/Makefile image_name)" "$tmpdir/$installed_image_path"
-
-if is_enabled CONFIG_OF_EARLY_FLATTREE; then
-       # Only some architectures with OF support have this target
-       if [ -d "${srctree}/arch/$SRCARCH/boot/dts" ]; then
-               $MAKE -f $srctree/Makefile INSTALL_DTBS_PATH="$tmpdir/usr/lib/$packagename" dtbs_install
-       fi
-fi
-
-if is_enabled CONFIG_MODULES; then
-       INSTALL_MOD_PATH="$tmpdir" $MAKE -f $srctree/Makefile modules_install
-       rm -f "$tmpdir/lib/modules/$version/build"
-       rm -f "$tmpdir/lib/modules/$version/source"
-       if [ "$ARCH" = "um" ] ; then
-               mv "$tmpdir/lib/modules/$version"/* "$tmpdir/usr/lib/uml/modules/$version/"
-               rmdir "$tmpdir/lib/modules/$version"
-       fi
-       if [ -n "$BUILD_DEBUG" ] ; then
-               for module in $(find $tmpdir/lib/modules/ -name *.ko -printf '%P\n'); do
-                       module=lib/modules/$module
-                       mkdir -p $(dirname $dbg_dir/usr/lib/debug/$module)
-                       # only keep debug symbols in the debug file
-                       $OBJCOPY --only-keep-debug $tmpdir/$module $dbg_dir/usr/lib/debug/$module
-                       # strip original module from debug symbols
-                       $OBJCOPY --strip-debug $tmpdir/$module
-                       # then add a link to those
-                       $OBJCOPY --add-gnu-debuglink=$dbg_dir/usr/lib/debug/$module $tmpdir/$module
-               done
-
-               # resign stripped modules
-               if is_enabled CONFIG_MODULE_SIG_ALL; then
-                       INSTALL_MOD_PATH="$tmpdir" $MAKE -f $srctree/Makefile modules_sign
-               fi
-       fi
-fi
-
-# Install the maintainer scripts
-# Note: hook scripts under /etc/kernel are also executed by official Debian
-# kernel packages, as well as kernel packages built using make-kpkg.
-# make-kpkg sets $INITRD to indicate whether an initramfs is wanted, and
-# so do we; recent versions of dracut and initramfs-tools will obey this.
-debhookdir=${KDEB_HOOKDIR:-/etc/kernel}
-for script in postinst postrm preinst prerm ; do
-       mkdir -p "$tmpdir$debhookdir/$script.d"
-       cat <<EOF > "$tmpdir/DEBIAN/$script"
-#!/bin/sh
-
-set -e
-
-# Pass maintainer script parameters to hook scripts
-export DEB_MAINT_PARAMS="\$*"
-
-# Tell initramfs builder whether it's wanted
-export INITRD=$(if_enabled_echo CONFIG_BLK_DEV_INITRD Yes No)
-
-test -d $debhookdir/$script.d && run-parts --arg="$version" --arg="/$installed_image_path" $debhookdir/$script.d
-exit 0
-EOF
-       chmod 755 "$tmpdir/DEBIAN/$script"
+rm -f debian/files
+
+packages_enabled=$(dh_listpackages)
+
+for package in ${packages_enabled}
+do
+       case ${package} in
+       *-dbg)
+               # This must be done after linux-image, that is, we expect the
+               # debug package appears after linux-image in debian/control.
+               install_linux_image_dbg debian/linux-image-dbg debian/linux-image;;
+       linux-image-*|user-mode-linux-*)
+               install_linux_image debian/linux-image ${package};;
+       linux-libc-dev)
+               install_libc_headers debian/linux-libc-dev;;
+       linux-headers-*)
+               install_kernel_headers debian/linux-headers;;
+       esac
 done
 
-if [ "$ARCH" != "um" ]; then
-       if is_enabled CONFIG_MODULES; then
-               deploy_kernel_headers debian/linux-headers
-               create_package linux-headers-$version debian/linux-headers
-       fi
-
-       deploy_libc_headers debian/linux-libc-dev
-       create_package linux-libc-dev debian/linux-libc-dev
-fi
-
-create_package "$packagename" "$tmpdir"
-
-if [ -n "$BUILD_DEBUG" ] ; then
-       # Build debug package
-       # Different tools want the image in different locations
-       # perf
-       mkdir -p $dbg_dir/usr/lib/debug/lib/modules/$version/
-       cp vmlinux $dbg_dir/usr/lib/debug/lib/modules/$version/
-       # systemtap
-       mkdir -p $dbg_dir/usr/lib/debug/boot/
-       ln -s ../lib/modules/$version/vmlinux $dbg_dir/usr/lib/debug/boot/vmlinux-$version
-       # kdump-tools
-       ln -s lib/modules/$version/vmlinux $dbg_dir/usr/lib/debug/vmlinux-$version
-       create_package "$dbg_packagename" "$dbg_dir"
-fi
+for package in ${packages_enabled}
+do
+       case ${package} in
+       *-dbg)
+               create_package ${package} debian/linux-image-dbg;;
+       linux-image-*|user-mode-linux-*)
+               create_package ${package} debian/linux-image;;
+       linux-libc-dev)
+               create_package ${package} debian/linux-libc-dev;;
+       linux-headers-*)
+               create_package ${package} debian/linux-headers;;
+       esac
+done
 
 exit 0
index b079b0d121d47359d447a83d87822072d619da28..7950eff01781a306681985ce5a0a46743c411af5 100755 (executable)
@@ -1,16 +1,14 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0-only
 
-# Set up CROSS_COMPILE if we are cross-compiling, but not called from the
-# kernel toplevel Makefile
-if [ -z "${CROSS_COMPILE}${cross_compiling}" -a "${DEB_HOST_ARCH}" != "${DEB_BUILD_ARCH}" ]; then
+# Set up CROSS_COMPILE if not defined yet
+if [ "${CROSS_COMPILE+set}" != "set" -a "${DEB_HOST_ARCH}" != "${DEB_BUILD_ARCH}" ]; then
        echo CROSS_COMPILE=${DEB_HOST_GNU_TYPE}-
 fi
 
 version=$(dpkg-parsechangelog -S Version)
-version_upstream="${version%-*}"
-debian_revision="${version#${version_upstream}}"
-debian_revision="${debian_revision#*-}"
+debian_revision="${version##*-}"
 
-echo KERNELRELEASE=${version_upstream}
-echo KBUILD_BUILD_VERSION=${debian_revision}
+if [ "${version}" != "${debian_revision}" ]; then
+       echo KBUILD_BUILD_VERSION=${debian_revision}
+fi
diff --git a/scripts/package/gen-diff-patch b/scripts/package/gen-diff-patch
new file mode 100755 (executable)
index 0000000..f842ab5
--- /dev/null
@@ -0,0 +1,44 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0-only
+
+diff_patch="${1}"
+untracked_patch="${2}"
+srctree=$(dirname $0)/../..
+
+rm -f ${diff_patch} ${untracked_patch}
+
+if ! ${srctree}/scripts/check-git; then
+       exit
+fi
+
+mkdir -p "$(dirname ${diff_patch})" "$(dirname ${untracked_patch})"
+
+git -C "${srctree}" diff HEAD > "${diff_patch}"
+
+if [ ! -s "${diff_patch}" ]; then
+       rm -f "${diff_patch}"
+       exit
+fi
+
+git -C ${srctree} status --porcelain --untracked-files=all |
+while read stat path
+do
+       if [ "${stat}" = '??' ]; then
+
+               if ! diff -u /dev/null "${srctree}/${path}" > .tmp_diff &&
+                       ! head -n1 .tmp_diff | grep -q "Binary files"; then
+                       {
+                               echo "--- /dev/null"
+                               echo "+++ linux/$path"
+                               cat .tmp_diff | tail -n +3
+                       } >> ${untracked_patch}
+               fi
+       fi
+done
+
+rm -f .tmp_diff
+
+if [ ! -s "${diff_patch}" ]; then
+       rm -f "${diff_patch}"
+       exit
+fi
index f74380036bb54e0b107e49da9bdaca8c0b89cb04..e20a2b5be9eb29c4af62227750e170b9dabb36f6 100755 (executable)
@@ -91,7 +91,7 @@ version=$KERNELRELEASE
 if [ -n "$KDEB_PKGVERSION" ]; then
        packageversion=$KDEB_PKGVERSION
 else
-       packageversion=$version-$($srctree/init/build-version)
+       packageversion=$(${srctree}/scripts/setlocalversion --no-local ${srctree})-$($srctree/init/build-version)
 fi
 sourcename=${KDEB_SOURCENAME:-linux-upstream}
 
@@ -152,6 +152,14 @@ mkdir -p debian/patches
 } > debian/patches/config
 echo config > debian/patches/series
 
+$(dirname $0)/gen-diff-patch debian/patches/diff.patch debian/patches/untracked.patch
+if [ -f debian/patches/diff.patch ]; then
+       echo diff.patch >> debian/patches/series
+fi
+if [ -f debian/patches/untracked.patch ]; then
+       echo untracked.patch >> debian/patches/series
+fi
+
 echo $debarch > debian/arch
 extra_build_depends=", $(if_enabled_echo CONFIG_UNWINDER_ORC libelf-dev:native)"
 extra_build_depends="$extra_build_depends, $(if_enabled_echo CONFIG_SYSTEM_TRUSTED_KEYRING libssl-dev:native)"
@@ -192,7 +200,7 @@ Section: kernel
 Priority: optional
 Maintainer: $maintainer
 Rules-Requires-Root: no
-Build-Depends: bc, rsync, kmod, cpio, bison, flex $extra_build_depends
+Build-Depends: bc, debhelper, rsync, kmod, cpio, bison, flex $extra_build_depends
 Homepage: https://www.kernel.org/
 
 Package: $packagename-$version
@@ -200,6 +208,10 @@ Architecture: $debarch
 Description: Linux kernel, version $version
  This package contains the Linux kernel, modules and corresponding other
  files, version: $version.
+EOF
+
+if [ "${SRCARCH}" != um ]; then
+cat <<EOF >> debian/control
 
 Package: linux-libc-dev
 Section: devel
@@ -222,6 +234,7 @@ Description: Linux kernel headers for $version on $debarch
  This is useful for people who need to build external modules
 EOF
 fi
+fi
 
 if is_enabled CONFIG_DEBUG_INFO; then
 cat <<EOF >> debian/control
@@ -239,10 +252,12 @@ cat <<EOF > debian/rules
 #!$(command -v $MAKE) -f
 
 srctree ?= .
+KERNELRELEASE = ${KERNELRELEASE}
 
 build-indep:
 build-arch:
        \$(MAKE) -f \$(srctree)/Makefile ARCH=${ARCH} \
+       KERNELRELEASE=\$(KERNELRELEASE) \
        \$(shell \$(srctree)/scripts/package/deb-build-option) \
        olddefconfig all
 
@@ -250,7 +265,9 @@ build: build-arch
 
 binary-indep:
 binary-arch: build-arch
-       \$(MAKE) -f \$(srctree)/Makefile ARCH=${ARCH} intdeb-pkg
+       \$(MAKE) -f \$(srctree)/Makefile ARCH=${ARCH} \
+       KERNELRELEASE=\$(KERNELRELEASE) intdeb-pkg
+
 clean:
        rm -rf debian/files debian/linux-*
        \$(MAKE) -f \$(srctree)/Makefile ARCH=${ARCH} clean
index 3c550960dd39554a0cf60f0e6e3fd123a0eafd46..b7d1dc28a5d6de457e988021785a18e0a7310762 100755 (executable)
@@ -15,15 +15,21 @@ if [ "$1" = prebuilt ]; then
        MAKE="$MAKE -f $srctree/Makefile"
 else
        S=
+
+       mkdir -p rpmbuild/SOURCES
+       cp linux.tar.gz rpmbuild/SOURCES
+       cp "${KCONFIG_CONFIG}" rpmbuild/SOURCES/config
+       $(dirname $0)/gen-diff-patch rpmbuild/SOURCES/diff.patch rpmbuild/SOURCES/untracked.patch
+       touch rpmbuild/SOURCES/diff.patch rpmbuild/SOURCES/untracked.patch
 fi
 
-if grep -q CONFIG_MODULES=y .config; then
+if grep -q CONFIG_MODULES=y include/config/auto.conf; then
        M=
 else
        M=DEL
 fi
 
-if grep -q CONFIG_DRM=y .config; then
+if grep -q CONFIG_DRM=y include/config/auto.conf; then
        PROVIDES=kernel-drm
 fi
 
@@ -48,7 +54,9 @@ sed -e '/^DEL/d' -e 's/^\t*//' <<EOF
        Vendor: The Linux Community
        URL: https://www.kernel.org
 $S     Source0: linux.tar.gz
-$S     Source1: .config
+$S     Source1: config
+$S     Source2: diff.patch
+$S     Source3: untracked.patch
        Provides: $PROVIDES
 $S     BuildRequires: bc binutils bison dwarves
 $S     BuildRequires: (elfutils-libelf-devel or libelf-devel) flex
@@ -85,7 +93,13 @@ $S$M against the $__KERNELRELEASE kernel package.
 $S$M
 $S     %prep
 $S     %setup -q -n linux
-$S     cp %{SOURCE1} .
+$S     cp %{SOURCE1} .config
+$S     if [ -s %{SOURCE2} ]; then
+$S             patch -p1 < %{SOURCE2}
+$S     fi
+$S     if [ -s %{SOURCE3} ]; then
+$S             patch -p1 < %{SOURCE3}
+$S     fi
 $S
 $S     %build
 $S     $MAKE %{?_smp_mflags} KERNELRELEASE=$KERNELRELEASE KBUILD_BUILD_VERSION=%{release}
index e54839a42d4b49daf5cc3102e93adab36b59ff50..3d3babac82982b4bfec180b402a7f9e557ceafb4 100755 (executable)
 #
 
 usage() {
-       echo "Usage: $0 [srctree]" >&2
+       echo "Usage: $0 [--no-local] [srctree]" >&2
        exit 1
 }
 
+no_local=false
+if test "$1" = "--no-local"; then
+       no_local=true
+       shift
+fi
+
 srctree=.
 if test $# -gt 0; then
        srctree=$1
@@ -26,14 +32,22 @@ fi
 
 scm_version()
 {
-       local short
+       local short=false
+       local no_dirty=false
        local tag
-       short=false
+
+       while [ $# -gt 0 ];
+       do
+               case "$1" in
+               --short)
+                       short=true;;
+               --no-dirty)
+                       no_dirty=true;;
+               esac
+               shift
+       done
 
        cd "$srctree"
-       if test "$1" = "--short"; then
-               short=true
-       fi
 
        if test -n "$(git rev-parse --show-cdup 2>/dev/null)"; then
                return
@@ -75,6 +89,10 @@ scm_version()
                printf '%s%s' -g "$(echo $head | cut -c1-12)"
        fi
 
+       if ${no_dirty}; then
+               return
+       fi
+
        # Check for uncommitted changes.
        # This script must avoid any write attempt to the source tree, which
        # might be read-only.
@@ -110,11 +128,6 @@ collect_files()
        echo "$res"
 }
 
-if ! test -e include/config/auto.conf; then
-       echo "Error: kernelrelease not valid - run 'make prepare' to update it" >&2
-       exit 1
-fi
-
 if [ -z "${KERNELVERSION}" ]; then
        echo "KERNELVERSION is not set" >&2
        exit 1
@@ -126,6 +139,16 @@ if test ! "$srctree" -ef .; then
        file_localversion="${file_localversion}$(collect_files "$srctree"/localversion*)"
 fi
 
+if ${no_local}; then
+       echo "${KERNELVERSION}$(scm_version --no-dirty)"
+       exit 0
+fi
+
+if ! test -e include/config/auto.conf; then
+       echo "Error: kernelrelease not valid - run 'make prepare' to update it" >&2
+       exit 1
+fi
+
 # version string from CONFIG_LOCALVERSION
 config_localversion=$(sed -n 's/^CONFIG_LOCALVERSION=\(.*\)$/\1/p' include/config/auto.conf)
 
index 2da4404276f0f5a68b0619dd6aa1d06a0fdb7198..07a0ef2baacd856324df852b38bfd087fb4dabbd 100644 (file)
@@ -38,9 +38,12 @@ static void cache_requested_key(struct key *key)
 #ifdef CONFIG_KEYS_REQUEST_CACHE
        struct task_struct *t = current;
 
-       key_put(t->cached_requested_key);
-       t->cached_requested_key = key_get(key);
-       set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
+       /* Do not cache key if it is a kernel thread */
+       if (!(t->flags & PF_KTHREAD)) {
+               key_put(t->cached_requested_key);
+               t->cached_requested_key = key_get(key);
+               set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
+       }
 #endif
 }
 
index ae31bb1275940453efb026410e5673d1e53a9d1e..317bdf6dcbef42f0954a7b8e3b757b5c8dcf8185 100644 (file)
@@ -472,6 +472,15 @@ static const struct config_entry config_table[] = {
        },
 #endif
 
+/* Meteor Lake */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_METEORLAKE)
+       /* Meteorlake-P */
+       {
+               .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
+               .device = 0x7e28,
+       },
+#endif
+
 };
 
 static const struct config_entry *snd_intel_dsp_find_config
index 27e11b5f70b97dc982867df42074fc35142517d3..c7d7eff86727f83672560c87fbf5e62114f67f91 100644 (file)
@@ -430,7 +430,7 @@ void HPI_6205(struct hpi_message *phm, struct hpi_response *phr)
                pao = hpi_find_adapter(phm->adapter_index);
        } else {
                /* subsys messages don't address an adapter */
-               _HPI_6205(NULL, phm, phr);
+               phr->error = HPI_ERROR_INVALID_OBJ_INDEX;
                return;
        }
 
index 81c4a45254ff2e33ee71f2a2a328b144954ff911..77a592f219472d9a472fa33d5e18a4fc6818350d 100644 (file)
@@ -328,14 +328,15 @@ enum {
 #define needs_eld_notify_link(chip)    false
 #endif
 
-#define CONTROLLER_IN_GPU(pci) (((pci)->device == 0x0a0c) || \
+#define CONTROLLER_IN_GPU(pci) (((pci)->vendor == 0x8086) &&         \
+                                      (((pci)->device == 0x0a0c) || \
                                        ((pci)->device == 0x0c0c) || \
                                        ((pci)->device == 0x0d0c) || \
                                        ((pci)->device == 0x160c) || \
                                        ((pci)->device == 0x490d) || \
                                        ((pci)->device == 0x4f90) || \
                                        ((pci)->device == 0x4f91) || \
-                                       ((pci)->device == 0x4f92))
+                                       ((pci)->device == 0x4f92)))
 
 #define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
 
index acde4cd58785e0cbaa8196bbacead97826e822ac..099722ebaed83901df9c23904baa13e2bd9734b3 100644 (file)
@@ -4228,8 +4228,10 @@ static int tuning_ctl_set(struct hda_codec *codec, hda_nid_t nid,
 
        for (i = 0; i < TUNING_CTLS_COUNT; i++)
                if (nid == ca0132_tuning_ctls[i].nid)
-                       break;
+                       goto found;
 
+       return -EINVAL;
+found:
        snd_hda_power_up(codec);
        dspio_set_param(codec, ca0132_tuning_ctls[i].mid, 0x20,
                        ca0132_tuning_ctls[i].req,
index 3c629f4ae08076c966f45e952a4b4ede60368570..f09a1d7c1b186e2219542e95bc4bef7df7a42248 100644 (file)
@@ -9447,6 +9447,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x8b8a, "HP", ALC236_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8b8b, "HP", ALC236_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8b8d, "HP", ALC236_FIXUP_HP_GPIO_LED),
+       SND_PCI_QUIRK(0x103c, 0x8b8f, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8b92, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8bf0, "HP", ALC236_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
@@ -9539,6 +9540,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x144d, 0xc830, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_AMP),
        SND_PCI_QUIRK(0x144d, 0xc832, "Samsung Galaxy Book Flex Alpha (NP730QCJ)", ALC256_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
        SND_PCI_QUIRK(0x144d, 0xca03, "Samsung Galaxy Book2 Pro 360 (NP930QED)", ALC298_FIXUP_SAMSUNG_AMP),
+       SND_PCI_QUIRK(0x144d, 0xc868, "Samsung Galaxy Book2 Pro (NP930XED)", ALC298_FIXUP_SAMSUNG_AMP),
        SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
        SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
        SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC),
index 4a4f09f924bc510ff54fff1cf02a008bca82d982..e3d398b8f54e4f0cd343e2bc56d5cf9295073eae 100644 (file)
@@ -968,6 +968,8 @@ int da7219_aad_init(struct snd_soc_component *component)
        INIT_WORK(&da7219_aad->hptest_work, da7219_aad_hptest_work);
        INIT_WORK(&da7219_aad->jack_det_work, da7219_aad_jack_det_work);
 
+       mutex_init(&da7219_aad->jack_det_mutex);
+
        ret = request_threaded_irq(da7219_aad->irq, da7219_aad_pre_irq_thread,
                                   da7219_aad_irq_thread,
                                   IRQF_TRIGGER_LOW | IRQF_ONESHOT,
index 01e8ffda2a4bf37655fc38b1b07fee22b14be197..6d980fbc4207780ccc966df8378f70cf554f4cd4 100644 (file)
@@ -428,8 +428,13 @@ static int hdmi_codec_startup(struct snd_pcm_substream *substream,
 {
        struct hdmi_codec_priv *hcp = snd_soc_dai_get_drvdata(dai);
        bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
+       bool has_capture = !hcp->hcd.no_i2s_capture;
+       bool has_playback = !hcp->hcd.no_i2s_playback;
        int ret = 0;
 
+       if (!((has_playback && tx) || (has_capture && !tx)))
+               return 0;
+
        mutex_lock(&hcp->lock);
        if (hcp->busy) {
                dev_err(dai->dev, "Only one simultaneous stream supported!\n");
@@ -468,6 +473,12 @@ static void hdmi_codec_shutdown(struct snd_pcm_substream *substream,
                                struct snd_soc_dai *dai)
 {
        struct hdmi_codec_priv *hcp = snd_soc_dai_get_drvdata(dai);
+       bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
+       bool has_capture = !hcp->hcd.no_i2s_capture;
+       bool has_playback = !hcp->hcd.no_i2s_playback;
+
+       if (!((has_playback && tx) || (has_capture && !tx)))
+               return;
 
        hcp->chmap_idx = HDMI_CODEC_CHMAP_IDX_UNKNOWN;
        hcp->hcd.ops->audio_shutdown(dai->dev->parent, hcp->hcd.data);
index bf27bdd5be2067a867c0ea48bda09a1372ab6a94..473d3cd3955487d6380a56922be9d5fbe13ab4c1 100644 (file)
@@ -242,7 +242,7 @@ enum {
 
 struct tx_mute_work {
        struct tx_macro *tx;
-       u32 decimator;
+       u8 decimator;
        struct delayed_work dwork;
 };
 
@@ -635,7 +635,7 @@ exit:
        return 0;
 }
 
-static bool is_amic_enabled(struct snd_soc_component *component, int decimator)
+static bool is_amic_enabled(struct snd_soc_component *component, u8 decimator)
 {
        u16 adc_mux_reg, adc_reg, adc_n;
 
@@ -849,7 +849,7 @@ static int tx_macro_enable_dec(struct snd_soc_dapm_widget *w,
                               struct snd_kcontrol *kcontrol, int event)
 {
        struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
-       unsigned int decimator;
+       u8 decimator;
        u16 tx_vol_ctl_reg, dec_cfg_reg, hpf_gate_reg, tx_gain_ctl_reg;
        u8 hpf_cut_off_freq;
        int hpf_delay = TX_MACRO_DMIC_HPF_DELAY_MS;
@@ -1064,7 +1064,8 @@ static int tx_macro_hw_params(struct snd_pcm_substream *substream,
                              struct snd_soc_dai *dai)
 {
        struct snd_soc_component *component = dai->component;
-       u32 decimator, sample_rate;
+       u32 sample_rate;
+       u8 decimator;
        int tx_fs_rate;
        struct tx_macro *tx = snd_soc_component_get_drvdata(component);
 
@@ -1128,7 +1129,7 @@ static int tx_macro_digital_mute(struct snd_soc_dai *dai, int mute, int stream)
 {
        struct snd_soc_component *component = dai->component;
        struct tx_macro *tx = snd_soc_component_get_drvdata(component);
-       u16 decimator;
+       u8 decimator;
 
        /* active decimator not set yet */
        if (tx->active_decimator[dai->id] == -1)
index 614eceda6b9e3181b184beb16eb3c0c5612f787f..33b67db8794e367b5a0d4e7e5d603b8a1f48bb2a 100644 (file)
@@ -294,6 +294,10 @@ config SND_SOC_IMX_SGTL5000
          Say Y if you want to add support for SoC audio on an i.MX board with
          a sgtl5000 codec.
 
+         Note that this is an old driver. Consider enabling
+         SND_SOC_FSL_ASOC_CARD and SND_SOC_SGTL5000 to use the newer
+         driver.
+
 config SND_SOC_IMX_SPDIF
        tristate "SoC Audio support for i.MX boards with S/PDIF"
        select SND_SOC_IMX_PCM_DMA
index acd43b6108e9979a7b881a4c8a19c87bb43a76fc..1a1d572cc1d028626bf7a68e950883b0ddb67062 100644 (file)
@@ -117,6 +117,26 @@ static void avs_da7219_codec_exit(struct snd_soc_pcm_runtime *rtd)
        snd_soc_component_set_jack(asoc_rtd_to_codec(rtd, 0)->component, NULL, NULL);
 }
 
+static int
+avs_da7219_be_fixup(struct snd_soc_pcm_runtime *runrime, struct snd_pcm_hw_params *params)
+{
+       struct snd_interval *rate, *channels;
+       struct snd_mask *fmt;
+
+       rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
+       channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
+       fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+
+       /* The ADSP will convert the FE rate to 48k, stereo */
+       rate->min = rate->max = 48000;
+       channels->min = channels->max = 2;
+
+       /* set SSP0 to 24 bit */
+       snd_mask_none(fmt);
+       snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE);
+       return 0;
+}
+
 static int avs_create_dai_link(struct device *dev, const char *platform_name, int ssp_port,
                               struct snd_soc_dai_link **dai_link)
 {
@@ -148,6 +168,7 @@ static int avs_create_dai_link(struct device *dev, const char *platform_name, in
        dl->num_platforms = 1;
        dl->id = 0;
        dl->dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS;
+       dl->be_hw_params_fixup = avs_da7219_be_fixup;
        dl->init = avs_da7219_codec_init;
        dl->exit = avs_da7219_codec_exit;
        dl->nonatomic = 1;
index 921f42caf7e09d0547a69e8aa460ec5cf3181d9f..183123d08c5a3b4e39f693d7ab13f14c49b96434 100644 (file)
@@ -8,6 +8,7 @@
 
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <sound/pcm_params.h>
 #include <sound/soc.h>
 #include <sound/soc-acpi.h>
 #include <sound/soc-dapm.h>
@@ -24,6 +25,26 @@ static const struct snd_soc_dapm_route card_base_routes[] = {
        { "Spk", NULL, "Speaker" },
 };
 
+static int
+avs_max98357a_be_fixup(struct snd_soc_pcm_runtime *runrime, struct snd_pcm_hw_params *params)
+{
+       struct snd_interval *rate, *channels;
+       struct snd_mask *fmt;
+
+       rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
+       channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
+       fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+
+       /* The ADSP will convert the FE rate to 48k, stereo */
+       rate->min = rate->max = 48000;
+       channels->min = channels->max = 2;
+
+       /* set SSP0 to 16 bit */
+       snd_mask_none(fmt);
+       snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S16_LE);
+       return 0;
+}
+
 static int avs_create_dai_link(struct device *dev, const char *platform_name, int ssp_port,
                               struct snd_soc_dai_link **dai_link)
 {
@@ -55,6 +76,7 @@ static int avs_create_dai_link(struct device *dev, const char *platform_name, in
        dl->num_platforms = 1;
        dl->id = 0;
        dl->dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS;
+       dl->be_hw_params_fixup = avs_max98357a_be_fixup;
        dl->nonatomic = 1;
        dl->no_pcm = 1;
        dl->dpcm_playback = 1;
index b31fa931ba8b6a848406e391ee3d28ddcf4f960e..b69fc5567135d5330c7a4cc75c0bf2e037ea043d 100644 (file)
@@ -33,15 +33,15 @@ avs_nau8825_clock_control(struct snd_soc_dapm_widget *w, struct snd_kcontrol *co
                return -EINVAL;
        }
 
-       if (!SND_SOC_DAPM_EVENT_ON(event)) {
+       if (SND_SOC_DAPM_EVENT_ON(event))
+               ret = snd_soc_dai_set_sysclk(codec_dai, NAU8825_CLK_MCLK, 24000000,
+                                            SND_SOC_CLOCK_IN);
+       else
                ret = snd_soc_dai_set_sysclk(codec_dai, NAU8825_CLK_INTERNAL, 0, SND_SOC_CLOCK_IN);
-               if (ret < 0) {
-                       dev_err(card->dev, "set sysclk err = %d\n", ret);
-                       return ret;
-               }
-       }
+       if (ret < 0)
+               dev_err(card->dev, "Set sysclk failed: %d\n", ret);
 
-       return 0;
+       return ret;
 }
 
 static const struct snd_kcontrol_new card_controls[] = {
index 473e9fe5d0bf746ae4c3446d4a9ab23f007bf838..b2c2ba93dcb5606b1d5febad49a1420614b44334 100644 (file)
@@ -169,6 +169,27 @@ static const struct snd_soc_ops avs_rt5682_ops = {
        .hw_params = avs_rt5682_hw_params,
 };
 
+static int
+avs_rt5682_be_fixup(struct snd_soc_pcm_runtime *runtime, struct snd_pcm_hw_params *params)
+{
+       struct snd_interval *rate, *channels;
+       struct snd_mask *fmt;
+
+       rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
+       channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
+       fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+
+       /* The ADSP will convert the FE rate to 48k, stereo */
+       rate->min = rate->max = 48000;
+       channels->min = channels->max = 2;
+
+       /* set SSPN to 24 bit */
+       snd_mask_none(fmt);
+       snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE);
+
+       return 0;
+}
+
 static int avs_create_dai_link(struct device *dev, const char *platform_name, int ssp_port,
                               struct snd_soc_dai_link **dai_link)
 {
@@ -201,6 +222,7 @@ static int avs_create_dai_link(struct device *dev, const char *platform_name, in
        dl->id = 0;
        dl->init = avs_rt5682_codec_init;
        dl->exit = avs_rt5682_codec_exit;
+       dl->be_hw_params_fixup = avs_rt5682_be_fixup;
        dl->ops = &avs_rt5682_ops;
        dl->nonatomic = 1;
        dl->no_pcm = 1;
index c5db6961276240c0d90d6a08c7189aa8bd153a18..2b7f5ad92aca7b885ba0686f829fe9262067f6da 100644 (file)
@@ -15,7 +15,6 @@
 #include <sound/soc-acpi.h>
 #include "../../../codecs/nau8825.h"
 
-#define SKL_NUVOTON_CODEC_DAI  "nau8825-hifi"
 #define SKL_SSM_CODEC_DAI      "ssm4567-hifi"
 
 static struct snd_soc_codec_conf card_codec_conf[] = {
@@ -34,41 +33,11 @@ static const struct snd_kcontrol_new card_controls[] = {
        SOC_DAPM_PIN_SWITCH("Right Speaker"),
 };
 
-static int
-platform_clock_control(struct snd_soc_dapm_widget *w, struct snd_kcontrol *control, int event)
-{
-       struct snd_soc_dapm_context *dapm = w->dapm;
-       struct snd_soc_card *card = dapm->card;
-       struct snd_soc_dai *codec_dai;
-       int ret;
-
-       codec_dai = snd_soc_card_get_codec_dai(card, SKL_NUVOTON_CODEC_DAI);
-       if (!codec_dai) {
-               dev_err(card->dev, "Codec dai not found\n");
-               return -EINVAL;
-       }
-
-       if (SND_SOC_DAPM_EVENT_ON(event)) {
-               ret = snd_soc_dai_set_sysclk(codec_dai, NAU8825_CLK_MCLK, 24000000,
-                                            SND_SOC_CLOCK_IN);
-               if (ret < 0)
-                       dev_err(card->dev, "set sysclk err = %d\n", ret);
-       } else {
-               ret = snd_soc_dai_set_sysclk(codec_dai, NAU8825_CLK_INTERNAL, 0, SND_SOC_CLOCK_IN);
-               if (ret < 0)
-                       dev_err(card->dev, "set sysclk err = %d\n", ret);
-       }
-
-       return ret;
-}
-
 static const struct snd_soc_dapm_widget card_widgets[] = {
        SND_SOC_DAPM_SPK("Left Speaker", NULL),
        SND_SOC_DAPM_SPK("Right Speaker", NULL),
        SND_SOC_DAPM_SPK("DP1", NULL),
        SND_SOC_DAPM_SPK("DP2", NULL),
-       SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0, platform_clock_control,
-                           SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
 };
 
 static const struct snd_soc_dapm_route card_base_routes[] = {
index 56ee5fef66a8be93b1eafdb210b698d9aa0c1ec8..28dd2046e4ac590375f3f39a9d660818e0fd3487 100644 (file)
@@ -559,7 +559,7 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_machines[] = {
        {
                .comp_ids = &essx_83x6,
                .drv_name = "sof-essx8336",
-               .sof_tplg_filename = "sof-adl-es83x6", /* the tplg suffix is added at run time */
+               .sof_tplg_filename = "sof-adl-es8336", /* the tplg suffix is added at run time */
                .tplg_quirk_mask = SND_SOC_ACPI_TPLG_INTEL_SSP_NUMBER |
                                        SND_SOC_ACPI_TPLG_INTEL_SSP_MSB |
                                        SND_SOC_ACPI_TPLG_INTEL_DMIC_NUMBER,
index 3aa63aac4a68e28540b06e5af4ca4ed151407a2a..81554d20265897e319fc3cf34d9191960c2bbe51 100644 (file)
@@ -184,9 +184,9 @@ int q6prm_set_lpass_clock(struct device *dev, int clk_id, int clk_attr, int clk_
                          unsigned int freq)
 {
        if (freq)
-               return q6prm_request_lpass_clock(dev, clk_id, clk_attr, clk_attr, freq);
+               return q6prm_request_lpass_clock(dev, clk_id, clk_attr, clk_root, freq);
 
-       return q6prm_release_lpass_clock(dev, clk_id, clk_attr, clk_attr, freq);
+       return q6prm_release_lpass_clock(dev, clk_id, clk_attr, clk_root, freq);
 }
 EXPORT_SYMBOL_GPL(q6prm_set_lpass_clock);
 
index 3aea36c077c9d2262e36b2eb1d9a7c23181c1712..f3bdeba2841221f8b205109db46d53ab04a112f1 100644 (file)
@@ -196,12 +196,15 @@ int hda_dsp_ctrl_init_chip(struct snd_sof_dev *sdev)
                goto err;
        }
 
+       usleep_range(500, 1000);
+
        /* exit HDA controller reset */
        ret = hda_dsp_ctrl_link_reset(sdev, false);
        if (ret < 0) {
                dev_err(sdev->dev, "error: failed to exit HDA controller reset\n");
                goto err;
        }
+       usleep_range(1000, 1200);
 
        hda_codec_detect_mask(sdev);
 
index 68eb06f13a1fd2539bab66cf9c255d860e059caf..a6f2822401e03da1d3553f50fa4370955219f6e2 100644 (file)
@@ -392,6 +392,12 @@ static int hda_dsp_update_d0i3c_register(struct snd_sof_dev *sdev, u8 value)
        snd_sof_dsp_update8(sdev, HDA_DSP_HDA_BAR, chip->d0i3_offset,
                            SOF_HDA_VS_D0I3C_I3, value);
 
+       /*
+        * The value written to the D0I3C::I3 bit may not be taken into account immediately.
+        * A delay is recommended before checking if D0I3C::CIP is cleared
+        */
+       usleep_range(30, 40);
+
        /* Wait for cmd in progress to be cleared before exiting the function */
        ret = hda_dsp_wait_d0i3c_done(sdev);
        if (ret < 0) {
@@ -400,6 +406,12 @@ static int hda_dsp_update_d0i3c_register(struct snd_sof_dev *sdev, u8 value)
        }
 
        reg = snd_sof_dsp_read8(sdev, HDA_DSP_HDA_BAR, chip->d0i3_offset);
+       /* Confirm d0i3 state changed with paranoia check */
+       if ((reg ^ value) & SOF_HDA_VS_D0I3C_I3) {
+               dev_err(sdev->dev, "failed to update D0I3C!\n");
+               return -EIO;
+       }
+
        trace_sof_intel_D0I3C_updated(sdev, reg);
 
        return 0;
index 69279dcc92dc136630fed5a7ab3860a9f5540091..aff6cb573c270f9a4cf387c400a6c85c94757031 100644 (file)
@@ -78,6 +78,7 @@ static const struct sof_dev_desc glk_desc = {
        .nocodec_tplg_filename = "sof-glk-nocodec.tplg",
        .ops = &sof_apl_ops,
        .ops_init = sof_apl_ops_init,
+       .ops_free = hda_ops_free,
 };
 
 /* PCI IDs */
index 8db3f8d15b55e9f6f708b91a9f11b5fe136ca774..4c0c1c369dcd8a8ffb1fec3d9de2360730e9f0f2 100644 (file)
@@ -48,6 +48,7 @@ static const struct sof_dev_desc cnl_desc = {
        .nocodec_tplg_filename = "sof-cnl-nocodec.tplg",
        .ops = &sof_cnl_ops,
        .ops_init = sof_cnl_ops_init,
+       .ops_free = hda_ops_free,
 };
 
 static const struct sof_dev_desc cfl_desc = {
@@ -111,6 +112,7 @@ static const struct sof_dev_desc cml_desc = {
        .nocodec_tplg_filename = "sof-cnl-nocodec.tplg",
        .ops = &sof_cnl_ops,
        .ops_init = sof_cnl_ops_init,
+       .ops_free = hda_ops_free,
 };
 
 /* PCI IDs */
index d6cf75e357dbf07c9b1e89894cb8fa530d3e26a6..6785669113b3c2ee74f4d0315b4f3053a61ad6e8 100644 (file)
@@ -79,6 +79,7 @@ static const struct sof_dev_desc jsl_desc = {
        .nocodec_tplg_filename = "sof-jsl-nocodec.tplg",
        .ops = &sof_cnl_ops,
        .ops_init = sof_cnl_ops_init,
+       .ops_free = hda_ops_free,
 };
 
 /* PCI IDs */
index 6e4e6d4ef5a5649e781bbb78e053bb5b3f9edbb1..b183dc0014b4b5a102b57c63e0a787bc76b55d01 100644 (file)
@@ -46,6 +46,7 @@ static const struct sof_dev_desc mtl_desc = {
        .nocodec_tplg_filename = "sof-mtl-nocodec.tplg",
        .ops = &sof_mtl_ops,
        .ops_init = sof_mtl_ops_init,
+       .ops_free = hda_ops_free,
 };
 
 /* PCI IDs */
index 3a99dc444f92ea6b46014a4faaf9463fe4421c29..5b4bccf819658eeb356f2f58425c6ebb894f8436 100644 (file)
@@ -38,6 +38,7 @@ static struct sof_dev_desc skl_desc = {
        .nocodec_tplg_filename = "sof-skl-nocodec.tplg",
        .ops = &sof_skl_ops,
        .ops_init = sof_skl_ops_init,
+       .ops_free = hda_ops_free,
 };
 
 static struct sof_dev_desc kbl_desc = {
@@ -61,6 +62,7 @@ static struct sof_dev_desc kbl_desc = {
        .nocodec_tplg_filename = "sof-kbl-nocodec.tplg",
        .ops = &sof_skl_ops,
        .ops_init = sof_skl_ops_init,
+       .ops_free = hda_ops_free,
 };
 
 /* PCI IDs */
index e80c4dfef85a58d71d0db6246199fe55c8b64336..22e769e0831d9349331e646ed5fbbfab1debf68a 100644 (file)
@@ -48,6 +48,7 @@ static const struct sof_dev_desc tgl_desc = {
        .nocodec_tplg_filename = "sof-tgl-nocodec.tplg",
        .ops = &sof_tgl_ops,
        .ops_init = sof_tgl_ops_init,
+       .ops_free = hda_ops_free,
 };
 
 static const struct sof_dev_desc tglh_desc = {
@@ -110,6 +111,7 @@ static const struct sof_dev_desc ehl_desc = {
        .nocodec_tplg_filename = "sof-ehl-nocodec.tplg",
        .ops = &sof_tgl_ops,
        .ops_init = sof_tgl_ops_init,
+       .ops_free = hda_ops_free,
 };
 
 static const struct sof_dev_desc adls_desc = {
@@ -141,6 +143,7 @@ static const struct sof_dev_desc adls_desc = {
        .nocodec_tplg_filename = "sof-adl-nocodec.tplg",
        .ops = &sof_tgl_ops,
        .ops_init = sof_tgl_ops_init,
+       .ops_free = hda_ops_free,
 };
 
 static const struct sof_dev_desc adl_desc = {
@@ -172,6 +175,7 @@ static const struct sof_dev_desc adl_desc = {
        .nocodec_tplg_filename = "sof-adl-nocodec.tplg",
        .ops = &sof_tgl_ops,
        .ops_init = sof_tgl_ops_init,
+       .ops_free = hda_ops_free,
 };
 
 static const struct sof_dev_desc adl_n_desc = {
@@ -203,6 +207,7 @@ static const struct sof_dev_desc adl_n_desc = {
        .nocodec_tplg_filename = "sof-adl-nocodec.tplg",
        .ops = &sof_tgl_ops,
        .ops_init = sof_tgl_ops_init,
+       .ops_free = hda_ops_free,
 };
 
 static const struct sof_dev_desc rpls_desc = {
@@ -234,6 +239,7 @@ static const struct sof_dev_desc rpls_desc = {
        .nocodec_tplg_filename = "sof-rpl-nocodec.tplg",
        .ops = &sof_tgl_ops,
        .ops_init = sof_tgl_ops_init,
+       .ops_free = hda_ops_free,
 };
 
 static const struct sof_dev_desc rpl_desc = {
@@ -265,6 +271,7 @@ static const struct sof_dev_desc rpl_desc = {
        .nocodec_tplg_filename = "sof-rpl-nocodec.tplg",
        .ops = &sof_tgl_ops,
        .ops_init = sof_tgl_ops_init,
+       .ops_free = hda_ops_free,
 };
 
 /* PCI IDs */
index 5b2b409752c585d103c6485e1db5efbdd0243b7d..8c22a00266c06a6ffd94be4e2e921450d721ce14 100644 (file)
@@ -75,11 +75,7 @@ static int tangier_pci_probe(struct snd_sof_dev *sdev)
 
        /* LPE base */
        base = pci_resource_start(pci, desc->resindex_lpe_base) - IRAM_OFFSET;
-       size = pci_resource_len(pci, desc->resindex_lpe_base);
-       if (size < PCI_BAR_SIZE) {
-               dev_err(sdev->dev, "error: I/O region is too small.\n");
-               return -ENODEV;
-       }
+       size = PCI_BAR_SIZE;
 
        dev_dbg(sdev->dev, "LPE PHY base at 0x%x size 0x%x", base, size);
        sdev->bar[DSP_BAR] = devm_ioremap(sdev->dev, base, size);
index dceb78bfe17c68fc47efbc5c9611828d8efbc439..b1f425b39db94fd54b95bd6e9f3a6bc25538cf58 100644 (file)
@@ -2081,7 +2081,9 @@ static int sof_ipc3_dai_config(struct snd_sof_dev *sdev, struct snd_sof_widget *
                break;
        case SOF_DAI_INTEL_ALH:
                if (data) {
-                       config->dai_index = data->dai_index;
+                       /* save the dai_index during hw_params and reuse it for hw_free */
+                       if (flags & SOF_DAI_CONFIG_FLAGS_HW_PARAMS)
+                               config->dai_index = data->dai_index;
                        config->alh.stream_id = data->dai_data;
                }
                break;
@@ -2089,7 +2091,30 @@ static int sof_ipc3_dai_config(struct snd_sof_dev *sdev, struct snd_sof_widget *
                break;
        }
 
-       config->flags = flags;
+       /*
+        * The dai_config op is invoked several times and the flags argument varies as below:
+        * BE DAI hw_params: When the op is invoked during the BE DAI hw_params, flags contains
+        * SOF_DAI_CONFIG_FLAGS_HW_PARAMS along with quirks
+        * FE DAI hw_params: When invoked during FE DAI hw_params after the DAI widget has
+        * just been set up in the DSP, flags is set to SOF_DAI_CONFIG_FLAGS_HW_PARAMS with no
+        * quirks
+        * BE DAI trigger: When invoked during the BE DAI trigger, flags is set to
+        * SOF_DAI_CONFIG_FLAGS_PAUSE and contains no quirks
+        * BE DAI hw_free: When invoked during the BE DAI hw_free, flags is set to
+        * SOF_DAI_CONFIG_FLAGS_HW_FREE and contains no quirks
+        * FE DAI hw_free: When invoked during the FE DAI hw_free, flags is set to
+        * SOF_DAI_CONFIG_FLAGS_HW_FREE and contains no quirks
+        *
+        * The DAI_CONFIG IPC is sent to the DSP, only after the widget is set up during the FE
+        * DAI hw_params. But since the BE DAI hw_params precedes the FE DAI hw_params, the quirks
+        * need to be preserved when assigning the flags before sending the IPC.
+        * For the case of PAUSE/HW_FREE, since there are no quirks, flags can be used as is.
+        */
+
+       if (flags & SOF_DAI_CONFIG_FLAGS_HW_PARAMS)
+               config->flags |= flags;
+       else
+               config->flags = flags;
 
        /* only send the IPC if the widget is set up in the DSP */
        if (swidget->use_count > 0) {
@@ -2097,6 +2122,9 @@ static int sof_ipc3_dai_config(struct snd_sof_dev *sdev, struct snd_sof_widget *
                                         &reply, sizeof(reply));
                if (ret < 0)
                        dev_err(sdev->dev, "Failed to set dai config for %s\n", dai->name);
+
+               /* clear the flags once the IPC has been sent even if it fails */
+               config->flags = SOF_DAI_CONFIG_FLAGS_NONE;
        }
 
        return ret;
index 3de64ea2dc9aa533c042097e20488630976e0c12..4493bbd7faf12bbda0a2e9ed8d4f6c869f83b0c8 100644 (file)
@@ -970,8 +970,9 @@ static void sof_ipc3_rx_msg(struct snd_sof_dev *sdev)
                return;
        }
 
-       if (hdr.size < sizeof(hdr)) {
-               dev_err(sdev->dev, "The received message size is invalid\n");
+       if (hdr.size < sizeof(hdr) || hdr.size > SOF_IPC_MSG_MAX_SIZE) {
+               dev_err(sdev->dev, "The received message size is invalid: %u\n",
+                       hdr.size);
                return;
        }
 
index 67bd2233fd9a675746ca91396ccddf25fb547f1c..9a71af1a613ac3800c9a5016de2a638cfac5d2d5 100644 (file)
@@ -97,7 +97,8 @@ sof_ipc4_set_volume_data(struct snd_sof_dev *sdev, struct snd_sof_widget *swidge
                }
 
                /* set curve type and duration from topology */
-               data.curve_duration = gain->data.curve_duration;
+               data.curve_duration_l = gain->data.curve_duration_l;
+               data.curve_duration_h = gain->data.curve_duration_h;
                data.curve_type = gain->data.curve_type;
 
                msg->data_ptr = &data;
index 3e27c7a48ebd39a3136063ee9d4daccf07047cb7..a623707c8ffc74a61a7176af8eb54fd0350f3aaa 100644 (file)
@@ -107,7 +107,7 @@ static const struct sof_topology_token gain_tokens[] = {
                get_token_u32, offsetof(struct sof_ipc4_gain_data, curve_type)},
        {SOF_TKN_GAIN_RAMP_DURATION,
                SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
-               offsetof(struct sof_ipc4_gain_data, curve_duration)},
+               offsetof(struct sof_ipc4_gain_data, curve_duration_l)},
        {SOF_TKN_GAIN_VAL, SND_SOC_TPLG_TUPLE_TYPE_WORD,
                get_token_u32, offsetof(struct sof_ipc4_gain_data, init_val)},
 };
@@ -155,7 +155,7 @@ static void sof_ipc4_dbg_audio_format(struct device *dev,
        for (i = 0; i < num_format; i++, ptr = (u8 *)ptr + object_size) {
                fmt = ptr;
                dev_dbg(dev,
-                       " #%d: %uKHz, %ubit (ch_map %#x ch_cfg %u interleaving_style %u fmt_cfg %#x)\n",
+                       " #%d: %uHz, %ubit (ch_map %#x ch_cfg %u interleaving_style %u fmt_cfg %#x)\n",
                        i, fmt->sampling_frequency, fmt->bit_depth, fmt->ch_map,
                        fmt->ch_cfg, fmt->interleaving_style, fmt->fmt_cfg);
        }
@@ -692,7 +692,7 @@ static int sof_ipc4_widget_setup_comp_pga(struct snd_sof_widget *swidget)
 
        dev_dbg(scomp->dev,
                "pga widget %s: ramp type: %d, ramp duration %d, initial gain value: %#x, cpc %d\n",
-               swidget->widget->name, gain->data.curve_type, gain->data.curve_duration,
+               swidget->widget->name, gain->data.curve_type, gain->data.curve_duration_l,
                gain->data.init_val, gain->base_config.cpc);
 
        ret = sof_ipc4_widget_setup_msg(swidget, &gain->msg);
@@ -980,6 +980,7 @@ static void sof_ipc4_unprepare_copier_module(struct snd_sof_widget *swidget)
 
                ipc4_copier = dai->private;
                if (ipc4_copier->dai_type == SOF_DAI_INTEL_ALH) {
+                       struct sof_ipc4_copier_data *copier_data = &ipc4_copier->data;
                        struct sof_ipc4_alh_configuration_blob *blob;
                        unsigned int group_id;
 
@@ -989,6 +990,9 @@ static void sof_ipc4_unprepare_copier_module(struct snd_sof_widget *swidget)
                                           ALH_MULTI_GTW_BASE;
                                ida_free(&alh_group_ida, group_id);
                        }
+
+                       /* clear the node ID */
+                       copier_data->gtw_cfg.node_id &= ~SOF_IPC4_NODE_INDEX_MASK;
                }
        }
 
@@ -1940,8 +1944,15 @@ static int sof_ipc4_dai_config(struct snd_sof_dev *sdev, struct snd_sof_widget *
                pipeline->skip_during_fe_trigger = true;
                fallthrough;
        case SOF_DAI_INTEL_ALH:
-               copier_data->gtw_cfg.node_id &= ~SOF_IPC4_NODE_INDEX_MASK;
-               copier_data->gtw_cfg.node_id |= SOF_IPC4_NODE_INDEX(data->dai_data);
+               /*
+                * Do not clear the node ID when this op is invoked with
+                * SOF_DAI_CONFIG_FLAGS_HW_FREE. It is needed to free the group_ida during
+                * unprepare.
+                */
+               if (flags & SOF_DAI_CONFIG_FLAGS_HW_PARAMS) {
+                       copier_data->gtw_cfg.node_id &= ~SOF_IPC4_NODE_INDEX_MASK;
+                       copier_data->gtw_cfg.node_id |= SOF_IPC4_NODE_INDEX(data->dai_data);
+               }
                break;
        case SOF_DAI_INTEL_DMIC:
        case SOF_DAI_INTEL_SSP:
index 72529179ac22343063ca3ca1ce9873cd36ca75b3..123f1096f3261d46b0e1eda51376ce9b3c02e1d8 100644 (file)
@@ -46,7 +46,7 @@
 #define SOF_IPC4_NODE_INDEX_INTEL_SSP(x) (((x) & 0xf) << 4)
 
 /* Node ID for DMIC type DAI copiers */
-#define SOF_IPC4_NODE_INDEX_INTEL_DMIC(x) (((x) & 0x7) << 5)
+#define SOF_IPC4_NODE_INDEX_INTEL_DMIC(x) ((x) & 0x7)
 
 #define SOF_IPC4_GAIN_ALL_CHANNELS_MASK 0xffffffff
 #define SOF_IPC4_VOL_ZERO_DB   0x7fffffff
@@ -277,14 +277,16 @@ struct sof_ipc4_control_data {
  * @init_val: Initial value
  * @curve_type: Curve type
  * @reserved: reserved for future use
- * @curve_duration: Curve duration
+ * @curve_duration_l: Curve duration low part
+ * @curve_duration_h: Curve duration high part
  */
 struct sof_ipc4_gain_data {
        uint32_t channels;
        uint32_t init_val;
        uint32_t curve_type;
        uint32_t reserved;
-       uint32_t curve_duration;
+       uint32_t curve_duration_l;
+       uint32_t curve_duration_h;
 } __aligned(8);
 
 /**
index 760621bfc80284a05c2ebed28579b29368a01f81..6de388a8d0b8df64e6653acc2df7e09510d50326 100644 (file)
@@ -50,9 +50,27 @@ static int sof_widget_free_unlocked(struct snd_sof_dev *sdev,
        /* reset route setup status for all routes that contain this widget */
        sof_reset_route_setup_status(sdev, swidget);
 
+       /* free DAI config and continue to free widget even if it fails */
+       if (WIDGET_IS_DAI(swidget->id)) {
+               struct snd_sof_dai_config_data data;
+               unsigned int flags = SOF_DAI_CONFIG_FLAGS_HW_FREE;
+
+               data.dai_data = DMA_CHAN_INVALID;
+
+               if (tplg_ops && tplg_ops->dai_config) {
+                       err = tplg_ops->dai_config(sdev, swidget, flags, &data);
+                       if (err < 0)
+                               dev_err(sdev->dev, "failed to free config for widget %s\n",
+                                       swidget->widget->name);
+               }
+       }
+
        /* continue to disable core even if IPC fails */
-       if (tplg_ops && tplg_ops->widget_free)
-               err = tplg_ops->widget_free(sdev, swidget);
+       if (tplg_ops && tplg_ops->widget_free) {
+               ret = tplg_ops->widget_free(sdev, swidget);
+               if (ret < 0 && !err)
+                       err = ret;
+       }
 
        /*
         * disable widget core. continue to route setup status and complete flag
@@ -151,8 +169,12 @@ static int sof_widget_setup_unlocked(struct snd_sof_dev *sdev,
 
        /* send config for DAI components */
        if (WIDGET_IS_DAI(swidget->id)) {
-               unsigned int flags = SOF_DAI_CONFIG_FLAGS_NONE;
+               unsigned int flags = SOF_DAI_CONFIG_FLAGS_HW_PARAMS;
 
+               /*
+                * The config flags saved during BE DAI hw_params will be used for IPC3. IPC4 does
+                * not use the flags argument.
+                */
                if (tplg_ops && tplg_ops->dai_config) {
                        ret = tplg_ops->dai_config(sdev, swidget, flags, NULL);
                        if (ret < 0)
@@ -588,8 +610,8 @@ int sof_widget_list_setup(struct snd_sof_dev *sdev, struct snd_sof_pcm *spcm,
        ret = sof_walk_widgets_in_order(sdev, spcm, fe_params, platform_params,
                                        dir, SOF_WIDGET_SETUP);
        if (ret < 0) {
-               ret = sof_walk_widgets_in_order(sdev, spcm, fe_params, platform_params,
-                                               dir, SOF_WIDGET_UNPREPARE);
+               sof_walk_widgets_in_order(sdev, spcm, fe_params, platform_params,
+                                         dir, SOF_WIDGET_UNPREPARE);
                return ret;
        }
 
index 4a62ccc71fcbffa005145fa43f601db69f8ef7b4..9f3a038fe21add2ae6ebbe02a41a029f9bdf8eac 100644 (file)
@@ -1388,14 +1388,15 @@ static int sof_widget_ready(struct snd_soc_component *scomp, int index,
        if (ret < 0) {
                dev_err(scomp->dev, "failed to parse component pin tokens for %s\n",
                        w->name);
-               return ret;
+               goto widget_free;
        }
 
        if (swidget->num_sink_pins > SOF_WIDGET_MAX_NUM_PINS ||
            swidget->num_source_pins > SOF_WIDGET_MAX_NUM_PINS) {
                dev_err(scomp->dev, "invalid pins for %s: [sink: %d, src: %d]\n",
                        swidget->widget->name, swidget->num_sink_pins, swidget->num_source_pins);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto widget_free;
        }
 
        if (swidget->num_sink_pins > 1) {
@@ -1404,7 +1405,7 @@ static int sof_widget_ready(struct snd_soc_component *scomp, int index,
                if (ret < 0) {
                        dev_err(scomp->dev, "failed to parse sink pin binding for %s\n",
                                w->name);
-                       return ret;
+                       goto widget_free;
                }
        }
 
@@ -1414,7 +1415,7 @@ static int sof_widget_ready(struct snd_soc_component *scomp, int index,
                if (ret < 0) {
                        dev_err(scomp->dev, "failed to parse source pin binding for %s\n",
                                w->name);
-                       return ret;
+                       goto widget_free;
                }
        }
 
@@ -1436,9 +1437,8 @@ static int sof_widget_ready(struct snd_soc_component *scomp, int index,
        case snd_soc_dapm_dai_out:
                dai = kzalloc(sizeof(*dai), GFP_KERNEL);
                if (!dai) {
-                       kfree(swidget);
-                       return -ENOMEM;
-
+                       ret = -ENOMEM;
+                       goto widget_free;
                }
 
                ret = sof_widget_parse_tokens(scomp, swidget, tw, token_list, token_list_size);
@@ -1496,8 +1496,7 @@ static int sof_widget_ready(struct snd_soc_component *scomp, int index,
                        tw->shift, swidget->id, tw->name,
                        strnlen(tw->sname, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) > 0
                                ? tw->sname : "none");
-               kfree(swidget);
-               return ret;
+               goto widget_free;
        }
 
        if (sof_debug_check_flag(SOF_DBG_DISABLE_MULTICORE)) {
@@ -1518,10 +1517,7 @@ static int sof_widget_ready(struct snd_soc_component *scomp, int index,
                        if (ret) {
                                dev_err(scomp->dev, "widget event binding failed for %s\n",
                                        swidget->widget->name);
-                               kfree(swidget->private);
-                               kfree(swidget->tuples);
-                               kfree(swidget);
-                               return ret;
+                               goto free;
                        }
                }
        }
@@ -1532,10 +1528,8 @@ static int sof_widget_ready(struct snd_soc_component *scomp, int index,
 
                spipe = kzalloc(sizeof(*spipe), GFP_KERNEL);
                if (!spipe) {
-                       kfree(swidget->private);
-                       kfree(swidget->tuples);
-                       kfree(swidget);
-                       return -ENOMEM;
+                       ret = -ENOMEM;
+                       goto free;
                }
 
                spipe->pipe_widget = swidget;
@@ -1546,6 +1540,12 @@ static int sof_widget_ready(struct snd_soc_component *scomp, int index,
        w->dobj.private = swidget;
        list_add(&swidget->list, &sdev->widget_list);
        return ret;
+free:
+       kfree(swidget->private);
+       kfree(swidget->tuples);
+widget_free:
+       kfree(swidget);
+       return ret;
 }
 
 static int sof_route_unload(struct snd_soc_component *scomp,
index f68e2e9eef8b27223f753399dab743f370318d69..a2c484c243f5d9ba5da21fd932d802db1e23054e 100755 (executable)
@@ -87,10 +87,14 @@ xfail grep -i "error" $OUTFILE
 
 echo "Max node number check"
 
-echo -n > $TEMPCONF
-for i in `seq 1 1024` ; do
-   echo "node$i" >> $TEMPCONF
-done
+awk '
+BEGIN {
+  for (i = 0; i < 26; i += 1)
+      printf("%c\n", 65 + i % 26)
+  for (i = 26; i < 8192; i += 1)
+      printf("%c%c%c\n", 65 + i % 26, 65 + (i / 26) % 26, 65 + (i / 26 / 26))
+}
+' > $TEMPCONF
 xpass $BOOTCONF -a $TEMPCONF $INITRD
 
 echo "badnode" >> $TEMPCONF
index 8c4e3e536c04285e155617cde126f2c4fce10e08..639524b59930bfcabcd0d352048a3fd4aa1da08b 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause */
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
 /* Do not edit directly, auto-generated from: */
 /*     Documentation/netlink/specs/netdev.yaml */
 /* YNL-GEN uapi header */
@@ -33,6 +33,8 @@ enum netdev_xdp_act {
        NETDEV_XDP_ACT_HW_OFFLOAD = 16,
        NETDEV_XDP_ACT_RX_SG = 32,
        NETDEV_XDP_ACT_NDO_XMIT_SG = 64,
+
+       NETDEV_XDP_ACT_MASK = 127,
 };
 
 enum {
index fbaf683353945d11b9c82a6577729c1e41eab93c..e4d05662a96ce38ba029f85960ff896046a22976 100644 (file)
@@ -20,8 +20,8 @@
 /* make sure libbpf doesn't use kernel-only integer typedefs */
 #pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
 
-/* prevent accidental re-addition of reallocarray()/strlcpy() */
-#pragma GCC poison reallocarray strlcpy
+/* prevent accidental re-addition of reallocarray() */
+#pragma GCC poison reallocarray
 
 #include "libbpf.h"
 #include "btf.h"
index a34d088f67432187c0d5b0e457788081d1ba5f95..d04450c2a44af866ef30cf6892a9d0d956c02b92 100644 (file)
@@ -138,10 +138,8 @@ class SpecEnumSet(SpecElement):
 
     def get_mask(self):
         mask = 0
-        idx = self.yaml.get('value-start', 0)
-        for _ in self.entries.values():
-            mask |= 1 << idx
-            idx += 1
+        for e in self.entries.values():
+            mask += e.user_value()
         return mask
 
 
@@ -276,6 +274,7 @@ class SpecFamily(SpecElement):
 
     Attributes:
         proto     protocol type (e.g. genetlink)
+        license   spec license (loaded from an SPDX tag on the spec)
 
         attr_sets  dict of attribute sets
         msgs       dict of all messages (index by name)
@@ -285,6 +284,13 @@ class SpecFamily(SpecElement):
     """
     def __init__(self, spec_path, schema_path=None):
         with open(spec_path, "r") as stream:
+            prefix = '# SPDX-License-Identifier: '
+            first = stream.readline().strip()
+            if not first.startswith(prefix):
+                raise Exception('SPDX license tag required in the spec')
+            self.license = first[len(prefix):]
+
+            stream.seek(0)
             spec = yaml.safe_load(stream)
 
         self._resolution_list = []
@@ -389,7 +395,8 @@ class SpecFamily(SpecElement):
     def resolve(self):
         self.resolve_up(super())
 
-        for elem in self.yaml['definitions']:
+        definitions = self.yaml.get('definitions', [])
+        for elem in definitions:
             if elem['type'] == 'enum' or elem['type'] == 'flags':
                 self.consts[elem['name']] = self.new_enum(elem)
             else:
index 90764a83c6461118d6dd9aeff19d16224bf87f9a..32536e1f9064bba576a267b5a02f6fe16a38d4a5 100644 (file)
@@ -200,7 +200,7 @@ def _genl_msg(nl_type, nl_flags, genl_cmd, genl_version, seq=None):
     if seq is None:
         seq = random.randint(1, 1024)
     nlmsg = struct.pack("HHII", nl_type, nl_flags, seq, 0)
-    genlmsg = struct.pack("bbH", genl_cmd, genl_version, 0)
+    genlmsg = struct.pack("BBH", genl_cmd, genl_version, 0)
     return nlmsg + genlmsg
 
 
@@ -264,7 +264,7 @@ class GenlMsg:
         self.hdr = nl_msg.raw[0:4]
         self.raw = nl_msg.raw[4:]
 
-        self.genl_cmd, self.genl_version, _ = struct.unpack("bbH", self.hdr)
+        self.genl_cmd, self.genl_version, _ = struct.unpack("BBH", self.hdr)
 
         self.raw_attrs = NlAttrs(self.raw)
 
@@ -358,7 +358,7 @@ class YnlFamily(SpecFamily):
                 raw >>= 1
                 i += 1
         else:
-            value = enum['entries'][raw - i]
+            value = enum.entries_by_val[raw - i].name
         rsp[attr_spec['name']] = value
 
     def _decode(self, attrs, space):
index 1bcc5354d8000725b52eeea7d77c8710afcf08c6..c16671a02621bfbbe8018985d614c4766ea7fed0 100755 (executable)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python3
-# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
 
 import argparse
 import collections
@@ -1931,9 +1931,14 @@ def render_uapi(family, cw):
 
             if const.get('render-max', False):
                 cw.nl()
-                max_name = c_upper(name_pfx + 'max')
-                cw.p('__' + max_name + ',')
-                cw.p(max_name + ' = (__' + max_name + ' - 1)')
+                if const['type'] == 'flags':
+                    max_name = c_upper(name_pfx + 'mask')
+                    max_val = f' = {enum.get_mask()},'
+                    cw.p(max_name + max_val)
+                else:
+                    max_name = c_upper(name_pfx + 'max')
+                    cw.p('__' + max_name + ',')
+                    cw.p(max_name + ' = (__' + max_name + ' - 1)')
             cw.block_end(line=';')
             cw.nl()
         elif const['type'] == 'const':
@@ -2054,6 +2059,10 @@ def main():
 
     try:
         parsed = Family(args.spec)
+        if parsed.license != '((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)':
+            print('Spec license:', parsed.license)
+            print('License must be: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)')
+            os.sys.exit(1)
     except yaml.YAMLError as exc:
         print(exc)
         os.sys.exit(1)
@@ -2062,13 +2071,10 @@ def main():
     cw = CodeWriter(BaseNlLib(), out_file)
 
     _, spec_kernel = find_kernel_root(args.spec)
-    if args.mode == 'uapi':
-        cw.p('/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause */')
+    if args.mode == 'uapi' or args.header:
+        cw.p(f'/* SPDX-License-Identifier: {parsed.license} */')
     else:
-        if args.header:
-            cw.p('/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */')
-        else:
-            cw.p('// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause')
+        cw.p(f'// SPDX-License-Identifier: {parsed.license}')
     cw.p("/* Do not edit directly, auto-generated from: */")
     cw.p(f"/*\t{spec_kernel} */")
     cw.p(f"/* YNL-GEN {args.mode} {'header' if args.header else 'source'} */")
index 52aa0351533c315e4c7bfd4e6a318642757c5e73..388c9e3ad0407b8b536dc69ffc6ea91d0ddd27f6 100644 (file)
@@ -97,7 +97,7 @@ static struct option long_options[] = {
 static void parse_options(int argc, char **argv)
 {
        int option_index = 0;
-       char *pathname;
+       char *pathname, *endptr;
        int opt;
 
        pathname = strdup(argv[0]);
@@ -125,11 +125,23 @@ static void parse_options(int argc, char **argv)
                        log_getinfo = 1;
                        break;
                case 'T':
-                       log_type = atoi(optarg);
+                       log_type = strtol(optarg, &endptr, 0);
+                       if (*endptr || (log_type != 0 && log_type != 1)) {
+                               printf("Number expected: type(0:execution, 1:history) - Quit.\n");
+                               exit(1);
+                       }
+
                        set_log_type = 1;
                        break;
                case 'L':
-                       log_level = atoi(optarg);
+                       log_level = strtol(optarg, &endptr, 0);
+                       if (*endptr ||
+                           (log_level != 0 && log_level != 1 &&
+                            log_level != 2 && log_level != 4)) {
+                               printf("Number expected: level(0, 1, 2, 4) - Quit.\n");
+                               exit(1);
+                       }
+
                        set_log_level = 1;
                        break;
                case 'R':
index 82c09cd25cc2189e405d4cbefed5665514fee50a..bf4ac24a1c7aa818dc3bb96d2e2e070613ebe03a 100755 (executable)
@@ -5556,9 +5556,8 @@ def executeSuspend(quiet=False):
                if not quiet:
                        pprint('CAPTURING TRACE')
                op = sv.writeDatafileHeader(sv.ftracefile, testdata)
-               fp = open(tp+'trace', 'r')
-               for line in fp:
-                       op.write(line)
+               fp = open(tp+'trace', 'rb')
+               op.write(ascii(fp.read()))
                op.close()
                sv.fsetVal('', 'trace')
                sv.platforminfo(cmdafter)
index c7b26a3603afecc8d7bc1f8d282a8857d979268e..8f08c3fd498d5b81185519728fc1c28a8a0d4d5f 100644 (file)
@@ -340,10 +340,12 @@ starts a new interval.
 must be run as root.
 Alternatively, non-root users can be enabled to run turbostat this way:
 
-# setcap cap_sys_admin,cap_sys_rawio,cap_sys_nice=+ep ./turbostat
+# setcap cap_sys_admin,cap_sys_rawio,cap_sys_nice=+ep path/to/turbostat
 
 # chmod +r /dev/cpu/*/msr
 
+# chmod +r /dev/cpu_dma_latency
+
 .B "turbostat "
 reads hardware counters, but doesn't write them.
 So it will not interfere with the OS or other programs, including
index aba460410dbd1b040942806edf676b01b163da8e..8a36ba5df9f90a0dc7c5ecd52b929de4d7b9eafb 100644 (file)
@@ -3,7 +3,7 @@
  * turbostat -- show CPU frequency and C-state residency
  * on modern Intel and AMD processors.
  *
- * Copyright (c) 2022 Intel Corporation.
+ * Copyright (c) 2023 Intel Corporation.
  * Len Brown <len.brown@intel.com>
  */
 
@@ -670,7 +670,7 @@ static int perf_instr_count_open(int cpu_num)
        /* counter for cpu_num, including user + kernel and all processes */
        fd = perf_event_open(&pea, -1, cpu_num, -1, 0);
        if (fd == -1) {
-               warn("cpu%d: perf instruction counter", cpu_num);
+               warnx("capget(CAP_PERFMON) failed, try \"# setcap cap_sys_admin=ep %s\"", progname);
                BIC_NOT_PRESENT(BIC_IPC);
        }
 
@@ -2538,7 +2538,7 @@ static void dump_turbo_ratio_limits(int trl_msr_offset, int family, int model)
 
        get_msr(base_cpu, trl_msr_offset, &msr);
        fprintf(outf, "cpu%d: MSR_%sTURBO_RATIO_LIMIT: 0x%08llx\n",
-               base_cpu, trl_msr_offset == MSR_SECONDARY_TURBO_RATIO_LIMIT ? "SECONDARY" : "", msr);
+               base_cpu, trl_msr_offset == MSR_SECONDARY_TURBO_RATIO_LIMIT ? "SECONDARY_" : "", msr);
 
        if (has_turbo_ratio_group_limits(family, model)) {
                get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT1, &core_counts);
@@ -3502,9 +3502,6 @@ release_msr:
 /*
  * set_my_sched_priority(pri)
  * return previous
- *
- * if non-root, do this:
- * # /sbin/setcap cap_sys_rawio,cap_sys_nice=+ep /usr/bin/turbostat
  */
 int set_my_sched_priority(int priority)
 {
@@ -3518,7 +3515,7 @@ int set_my_sched_priority(int priority)
 
        retval = setpriority(PRIO_PROCESS, 0, priority);
        if (retval)
-               err(retval, "setpriority(%d)", priority);
+               errx(retval, "capget(CAP_SYS_NICE) failed,try \"# setcap cap_sys_nice=ep %s\"", progname);
 
        errno = 0;
        retval = getpriority(PRIO_PROCESS, 0);
@@ -4426,7 +4423,7 @@ int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p)
 
        fprintf(outf, "cpu%d: MSR_HWP_STATUS: 0x%08llx "
                "(%sGuaranteed_Perf_Change, %sExcursion_Min)\n",
-               cpu, msr, ((msr) & 0x1) ? "" : "No-", ((msr) & 0x2) ? "" : "No-");
+               cpu, msr, ((msr) & 0x1) ? "" : "No-", ((msr) & 0x4) ? "" : "No-");
 
        return 0;
 }
@@ -5463,6 +5460,9 @@ unsigned int intel_model_duplicates(unsigned int model)
 
        case INTEL_FAM6_ICELAKE_D:
                return INTEL_FAM6_ICELAKE_X;
+
+       case INTEL_FAM6_EMERALDRAPIDS_X:
+               return INTEL_FAM6_SAPPHIRERAPIDS_X;
        }
        return model;
 }
@@ -5476,13 +5476,13 @@ void print_dev_latency(void)
 
        fd = open(path, O_RDONLY);
        if (fd < 0) {
-               warn("fopen %s\n", path);
+               warnx("capget(CAP_SYS_ADMIN) failed, try \"# setcap cap_sys_admin=ep %s\"", progname);
                return;
        }
 
        retval = read(fd, (void *)&value, sizeof(int));
        if (retval != sizeof(int)) {
-               warn("read %s\n", path);
+               warn("read failed %s", path);
                close(fd);
                return;
        }
@@ -5543,7 +5543,7 @@ void process_cpuid()
        edx_flags = edx;
 
        if (get_msr(sched_getcpu(), MSR_IA32_UCODE_REV, &ucode_patch))
-               warnx("get_msr(UCODE)\n");
+               warnx("get_msr(UCODE)");
 
        /*
         * check max extended function levels of CPUID.
@@ -6225,7 +6225,7 @@ int get_and_dump_counters(void)
 
 void print_version()
 {
-       fprintf(outf, "turbostat version 2022.10.04 - Len Brown <lenb@kernel.org>\n");
+       fprintf(outf, "turbostat version 2023.03.17 - Len Brown <lenb@kernel.org>\n");
 }
 
 #define COMMAND_LINE_SIZE 2048
index 5fd1424db37d82c399b4bfda72652e54c38dfe4c..c382f579fe94a73120e25233ab9da267557bd07b 100644 (file)
@@ -4,10 +4,15 @@
 # No binaries, but make sure arg-less "make" doesn't trigger "run_tests"
 all:
 
-uname_M := $(shell uname -m 2>/dev/null || echo not)
-ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/x86/ -e s/x86_64/x86/)
+ARCH ?= $(shell uname -m 2>/dev/null || echo not)
+ARCH := $(shell echo $(ARCH) | sed -e s/i.86/x86/ -e s/x86_64/x86/)
 
-TEST_PROGS := run.sh
-TEST_FILES := basic.sh tbench.sh gitsource.sh
+ifeq (x86,$(ARCH))
+TEST_FILES += ../../../power/x86/amd_pstate_tracer/amd_pstate_trace.py
+TEST_FILES += ../../../power/x86/intel_pstate_tracer/intel_pstate_tracer.py
+endif
+
+TEST_PROGS += run.sh
+TEST_FILES += basic.sh tbench.sh gitsource.sh
 
 include ../lib.mk
diff --git a/tools/testing/selftests/bpf/prog_tests/uninit_stack.c b/tools/testing/selftests/bpf/prog_tests/uninit_stack.c
new file mode 100644 (file)
index 0000000..e64c719
--- /dev/null
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+#include "uninit_stack.skel.h"
+
+void test_uninit_stack(void)
+{
+       RUN_TESTS(uninit_stack);
+}
index b3b326b8e2d1cb38cf374d18737529ce0fb7d9a4..6dab9cffda132b755c554fcddb67af752b22187a 100644 (file)
@@ -2,6 +2,7 @@
 /* Copyright (c) 2021 Facebook */
 #include "vmlinux.h"
 #include <bpf/bpf_helpers.h>
+#define vm_flags vm_start
 
 char _license[] SEC("license") = "GPL";
 
index 591104e79812ef2918c23f84b692790fe519f8fc..e96b901a733c5878d48de3dc1dcc3f4dd32f89af 100644 (file)
@@ -5,12 +5,10 @@
 #include <errno.h>
 #include <linux/capability.h>
 
-struct kernel_cap_struct {
-       __u64 val;
-} __attribute__((preserve_access_index));
+typedef struct { unsigned long long val; } kernel_cap_t;
 
 struct cred {
-       struct kernel_cap_struct cap_effective;
+       kernel_cap_t cap_effective;
 } __attribute__((preserve_access_index));
 
 char _license[] SEC("license") = "GPL";
@@ -18,8 +16,8 @@ char _license[] SEC("license") = "GPL";
 SEC("lsm.s/userns_create")
 int BPF_PROG(test_userns_create, const struct cred *cred, int ret)
 {
-       struct kernel_cap_struct caps = cred->cap_effective;
-       __u64 cap_mask = BIT_LL(CAP_SYS_ADMIN);
+       kernel_cap_t caps = cred->cap_effective;
+       __u64 cap_mask = 1ULL << CAP_SYS_ADMIN;
 
        if (ret)
                return 0;
index 98327bdbbfd24700900ac94667bb7e0269d9a1fd..8fba3f3649e227d521c84caac140ccf9dc5812a8 100644 (file)
@@ -5,12 +5,12 @@
 #include "bpf_misc.h"
 
 struct Small {
-       int x;
+       long x;
 };
 
 struct Big {
-       int x;
-       int y;
+       long x;
+       long y;
 };
 
 __noinline int foo(const struct Big *big)
@@ -22,7 +22,7 @@ __noinline int foo(const struct Big *big)
 }
 
 SEC("cgroup_skb/ingress")
-__failure __msg("invalid indirect read from stack")
+__failure __msg("invalid indirect access to stack")
 int global_func10(struct __sk_buff *skb)
 {
        const struct Small small = {.x = skb->len };
diff --git a/tools/testing/selftests/bpf/progs/uninit_stack.c b/tools/testing/selftests/bpf/progs/uninit_stack.c
new file mode 100644 (file)
index 0000000..8a40347
--- /dev/null
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+/* Read an uninitialized value from stack at a fixed offset */
+SEC("socket")
+__naked int read_uninit_stack_fixed_off(void *ctx)
+{
+       asm volatile ("                                 \
+               r0 = 0;                                 \
+               /* force stack depth to be 128 */       \
+               *(u64*)(r10 - 128) = r1;                \
+               r1 = *(u8 *)(r10 - 8 );                 \
+               r0 += r1;                               \
+               r1 = *(u8 *)(r10 - 11);                 \
+               r1 = *(u8 *)(r10 - 13);                 \
+               r1 = *(u8 *)(r10 - 15);                 \
+               r1 = *(u16*)(r10 - 16);                 \
+               r1 = *(u32*)(r10 - 32);                 \
+               r1 = *(u64*)(r10 - 64);                 \
+               /* read from a spill of a wrong size, it is a separate  \
+                * branch in check_stack_read_fixed_off()               \
+                */                                     \
+               *(u32*)(r10 - 72) = r1;                 \
+               r1 = *(u64*)(r10 - 72);                 \
+               r0 = 0;                                 \
+               exit;                                   \
+"
+                     ::: __clobber_all);
+}
+
+/* Read an uninitialized value from stack at a variable offset */
+SEC("socket")
+__naked int read_uninit_stack_var_off(void *ctx)
+{
+       asm volatile ("                                 \
+               call %[bpf_get_prandom_u32];            \
+               /* force stack depth to be 64 */        \
+               *(u64*)(r10 - 64) = r0;                 \
+               r0 = -r0;                               \
+               /* give r0 a range [-31, -1] */         \
+               if r0 s<= -32 goto exit_%=;             \
+               if r0 s>= 0 goto exit_%=;               \
+               /* access stack using r0 */             \
+               r1 = r10;                               \
+               r1 += r0;                               \
+               r2 = *(u8*)(r1 + 0);                    \
+exit_%=:       r0 = 0;                                 \
+               exit;                                   \
+"
+                     :
+                     : __imm(bpf_get_prandom_u32)
+                     : __clobber_all);
+}
+
+static __noinline void dummy(void) {}
+
+/* Pass a pointer to uninitialized stack memory to a helper.
+ * Passed memory block should be marked as STACK_MISC after helper call.
+ */
+SEC("socket")
+__log_level(7) __msg("fp-104=mmmmmmmm")
+__naked int helper_uninit_to_misc(void *ctx)
+{
+       asm volatile ("                                 \
+               /* force stack depth to be 128 */       \
+               *(u64*)(r10 - 128) = r1;                \
+               r1 = r10;                               \
+               r1 += -128;                             \
+               r2 = 32;                                \
+               call %[bpf_trace_printk];               \
+               /* Call to dummy() forces print_verifier_state(..., true),      \
+                * thus showing the stack state, matched by __msg().            \
+                */                                     \
+               call %[dummy];                          \
+               r0 = 0;                                 \
+               exit;                                   \
+"
+                     :
+                     : __imm(bpf_trace_printk),
+                       __imm(dummy)
+                     : __clobber_all);
+}
+
+char _license[] SEC("license") = "GPL";
index 9d993926bf0efb6a582077db5f751e496c98ef9e..289ed202ec66aec63d11e492a5ed60906e906500 100644 (file)
         * that fp-8 stack slot was unused in the fall-through
         * branch and will accept the program incorrectly
         */
-       BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
+       BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
+       BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 2, 2),
        BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
        BPF_JMP_IMM(BPF_JA, 0, 0, 0),
        BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
        BPF_LD_MAP_FD(BPF_REG_1, 0),
        BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
-       .fixup_map_hash_48b = { 6 },
-       .errstr = "invalid indirect read from stack R2 off -8+0 size 8",
-       .result = REJECT,
-       .prog_type = BPF_PROG_TYPE_XDP,
+       .fixup_map_hash_48b = { 7 },
+       .errstr_unpriv = "invalid indirect read from stack R2 off -8+0 size 8",
+       .result_unpriv = REJECT,
+       /* in privileged mode reads from uninitialized stack locations are permitted */
+       .result = ACCEPT,
 },
 {
        "calls: ctx read at start of subprog",
index a6c869a7319cd23a4ed5cae14e4aa0a80dad94c9..9c4885885aba0627b558136347267479e5436ea1 100644 (file)
 {
        "helper access to variable memory: stack, bitwise AND, zero included",
        .insns = {
-       BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
-       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
-       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
-       BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
-       BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
-       BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
-       BPF_MOV64_IMM(BPF_REG_3, 0),
-       BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
+       /* set max stack size */
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -128, 0),
+       /* set r3 to a random value */
+       BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
+       BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
+       /* use bitwise AND to limit r3 range to [0, 64] */
+       BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 64),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
+       BPF_MOV64_IMM(BPF_REG_4, 0),
+       /* Call bpf_ringbuf_output(), it is one of a few helper functions with
+        * ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode.
+        * For unpriv this should signal an error, because memory at &fp[-64] is
+        * not initialized.
+        */
+       BPF_EMIT_CALL(BPF_FUNC_ringbuf_output),
        BPF_EXIT_INSN(),
        },
-       .errstr = "invalid indirect read from stack R1 off -64+0 size 64",
-       .result = REJECT,
-       .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       .fixup_map_ringbuf = { 4 },
+       .errstr_unpriv = "invalid indirect read from stack R2 off -64+0 size 64",
+       .result_unpriv = REJECT,
+       /* in privileged mode reads from uninitialized stack locations are permitted */
+       .result = ACCEPT,
 },
 {
        "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
 {
        "helper access to variable memory: stack, JMP, no min check",
        .insns = {
-       BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
-       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
-       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
-       BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
-       BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
-       BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
-       BPF_MOV64_IMM(BPF_REG_3, 0),
-       BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
+       /* set max stack size */
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -128, 0),
+       /* set r3 to a random value */
+       BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
+       BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
+       /* use JMP to limit r3 range to [0, 64] */
+       BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 64, 6),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
+       BPF_MOV64_IMM(BPF_REG_4, 0),
+       /* Call bpf_ringbuf_output(), it is one of a few helper functions with
+        * ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode.
+        * For unpriv this should signal an error, because memory at &fp[-64] is
+        * not initialized.
+        */
+       BPF_EMIT_CALL(BPF_FUNC_ringbuf_output),
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
-       .errstr = "invalid indirect read from stack R1 off -64+0 size 64",
-       .result = REJECT,
-       .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       .fixup_map_ringbuf = { 4 },
+       .errstr_unpriv = "invalid indirect read from stack R2 off -64+0 size 64",
+       .result_unpriv = REJECT,
+       /* in privileged mode reads from uninitialized stack locations are permitted */
+       .result = ACCEPT,
 },
 {
        "helper access to variable memory: stack, JMP (signed), no min check",
 {
        "helper access to variable memory: 8 bytes leak",
        .insns = {
-       BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
-       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
-       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+       /* set max stack size */
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -128, 0),
+       /* set r3 to a random value */
+       BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
+       BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
        BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
        BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
        BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
+       /* Note: fp[-32] left uninitialized */
        BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
        BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
        BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
-       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
-       BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
-       BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
-       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
-       BPF_MOV64_IMM(BPF_REG_3, 0),
-       BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
-       BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+       /* Limit r3 range to [1, 64] */
+       BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 63),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 1),
+       BPF_MOV64_IMM(BPF_REG_4, 0),
+       /* Call bpf_ringbuf_output(), it is one of a few helper functions with
+        * ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode.
+        * For unpriv this should signal an error, because memory region [1, 64]
+        * at &fp[-64] is not fully initialized.
+        */
+       BPF_EMIT_CALL(BPF_FUNC_ringbuf_output),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
-       .errstr = "invalid indirect read from stack R1 off -64+32 size 64",
-       .result = REJECT,
-       .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       .fixup_map_ringbuf = { 3 },
+       .errstr_unpriv = "invalid indirect read from stack R2 off -64+32 size 64",
+       .result_unpriv = REJECT,
+       /* in privileged mode reads from uninitialized stack locations are permitted */
+       .result = ACCEPT,
 },
 {
        "helper access to variable memory: 8 bytes no leak (init memory)",
index 070893fb290074c0c68f1dae454993123eef5c02..02d9e004260b333944c29aed33e82c3d51e96a00 100644 (file)
                /* bpf_strtoul() */
                BPF_EMIT_CALL(BPF_FUNC_strtoul),
 
-               BPF_MOV64_IMM(BPF_REG_0, 1),
+               BPF_MOV64_IMM(BPF_REG_0, 0),
                BPF_EXIT_INSN(),
        },
-       .result = REJECT,
-       .prog_type = BPF_PROG_TYPE_CGROUP_SYSCTL,
-       .errstr = "invalid indirect read from stack R4 off -16+4 size 8",
+       .result_unpriv = REJECT,
+       .errstr_unpriv = "invalid indirect read from stack R4 off -16+4 size 8",
+       /* in privileged mode reads from uninitialized stack locations are permitted */
+       .result = ACCEPT,
 },
 {
        "ARG_PTR_TO_LONG misaligned",
index d63fd8991b03aa9e9f3f2ec1b3f03419f8a1686c..745d6b5842fd4e15e79820ea4c4cfd0659b14386 100644 (file)
                BPF_EXIT_INSN(),
        },
        .fixup_map_hash_8b = { 3 },
-       .errstr = "invalid read from stack off -16+0 size 8",
-       .result = REJECT,
-       .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       .errstr_unpriv = "invalid read from stack off -16+0 size 8",
+       .result_unpriv = REJECT,
+       /* in privileged mode reads from uninitialized stack locations are permitted */
+       .result = ACCEPT,
 },
 {
        "precision tracking for u32 spill/fill",
        BPF_EXIT_INSN(),
        },
        .flags = BPF_F_TEST_STATE_FREQ,
-       .errstr = "invalid read from stack off -8+1 size 8",
-       .result = REJECT,
+       .errstr_unpriv = "invalid read from stack off -8+1 size 8",
+       .result_unpriv = REJECT,
+       /* in privileged mode reads from uninitialized stack locations are permitted */
+       .result = ACCEPT,
 },
index d11d0b28be41672d35074d25c17a375b71be1061..108dd3ee1edda0cd9c326b85ed7b72b09b32cbd2 100644 (file)
        .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        .result = ACCEPT,
 },
-{
-       "sk_storage_get(map, skb->sk, &stack_value, 1): partially init stack_value",
-       .insns = {
-       BPF_MOV64_IMM(BPF_REG_2, 0),
-       BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8),
-       BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
-       BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
-       BPF_MOV64_IMM(BPF_REG_0, 0),
-       BPF_EXIT_INSN(),
-       BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
-       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
-       BPF_MOV64_IMM(BPF_REG_0, 0),
-       BPF_EXIT_INSN(),
-       BPF_MOV64_IMM(BPF_REG_4, 1),
-       BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
-       BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -8),
-       BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
-       BPF_LD_MAP_FD(BPF_REG_1, 0),
-       BPF_EMIT_CALL(BPF_FUNC_sk_storage_get),
-       BPF_MOV64_IMM(BPF_REG_0, 0),
-       BPF_EXIT_INSN(),
-       },
-       .fixup_sk_storage_map = { 14 },
-       .prog_type = BPF_PROG_TYPE_SCHED_CLS,
-       .result = REJECT,
-       .errstr = "invalid indirect read from stack",
-},
 {
        "bpf_map_lookup_elem(smap, &key)",
        .insns = {
index 9bb302dade237f250c705ff10826e7d40b8ec42f..d1463bf4949afd10188f25938a66c49cb75a2129 100644 (file)
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
-       .result = REJECT,
-       .errstr = "invalid read from stack off -4+0 size 4",
-       .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       .result_unpriv = REJECT,
+       .errstr_unpriv = "invalid read from stack off -4+0 size 4",
+       /* in privileged mode reads from uninitialized stack locations are permitted */
+       .result = ACCEPT,
 },
 {
        "Spill a u32 const scalar.  Refill as u16.  Offset to skb->data",
index d37f512fad16e3b8fa2d1f46fd2974a9a6937217..b183e26c03f10c09a824b4fb51a432bf0ddc32df 100644 (file)
        .result = REJECT,
        .prog_type = BPF_PROG_TYPE_LWT_IN,
 },
-{
-       "indirect variable-offset stack access, max_off+size > max_initialized",
-       .insns = {
-       /* Fill only the second from top 8 bytes of the stack. */
-       BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
-       /* Get an unknown value. */
-       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
-       /* Make it small and 4-byte aligned. */
-       BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
-       BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 16),
-       /* Add it to fp.  We now have either fp-12 or fp-16, but we don't know
-        * which. fp-12 size 8 is partially uninitialized stack.
-        */
-       BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
-       /* Dereference it indirectly. */
-       BPF_LD_MAP_FD(BPF_REG_1, 0),
-       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
-       BPF_MOV64_IMM(BPF_REG_0, 0),
-       BPF_EXIT_INSN(),
-       },
-       .fixup_map_hash_8b = { 5 },
-       .errstr = "invalid indirect read from stack R2 var_off",
-       .result = REJECT,
-       .prog_type = BPF_PROG_TYPE_LWT_IN,
-},
 {
        "indirect variable-offset stack access, min_off < min_initialized",
        .insns = {
        .result = ACCEPT,
        .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
 },
-{
-       "indirect variable-offset stack access, uninitialized",
-       .insns = {
-       BPF_MOV64_IMM(BPF_REG_2, 6),
-       BPF_MOV64_IMM(BPF_REG_3, 28),
-       /* Fill the top 16 bytes of the stack. */
-       BPF_ST_MEM(BPF_W, BPF_REG_10, -16, 0),
-       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
-       /* Get an unknown value. */
-       BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, 0),
-       /* Make it small and 4-byte aligned. */
-       BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 4),
-       BPF_ALU64_IMM(BPF_SUB, BPF_REG_4, 16),
-       /* Add it to fp.  We now have either fp-12 or fp-16, we don't know
-        * which, but either way it points to initialized stack.
-        */
-       BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_10),
-       BPF_MOV64_IMM(BPF_REG_5, 8),
-       /* Dereference it indirectly. */
-       BPF_EMIT_CALL(BPF_FUNC_getsockopt),
-       BPF_MOV64_IMM(BPF_REG_0, 0),
-       BPF_EXIT_INSN(),
-       },
-       .errstr = "invalid indirect read from stack R4 var_off",
-       .result = REJECT,
-       .prog_type = BPF_PROG_TYPE_SOCK_OPS,
-},
 {
        "indirect variable-offset stack access, ok",
        .insns = {
index 8e3b786a748f97bf7d288a25d97a614cd41210ca..a39bb2560d9bfe88473d2434dacbf4f2ac9fce8f 100644 (file)
@@ -8,7 +8,8 @@ TEST_PROGS := \
        dev_addr_lists.sh \
        mode-1-recovery-updelay.sh \
        mode-2-recovery-updelay.sh \
-       option_prio.sh
+       option_prio.sh \
+       bond-eth-type-change.sh
 
 TEST_FILES := \
        lag_lib.sh \
diff --git a/tools/testing/selftests/drivers/net/bonding/bond-eth-type-change.sh b/tools/testing/selftests/drivers/net/bonding/bond-eth-type-change.sh
new file mode 100755 (executable)
index 0000000..5cdd220
--- /dev/null
@@ -0,0 +1,85 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test bond device ether type changing
+#
+
+ALL_TESTS="
+       bond_test_unsuccessful_enslave_type_change
+       bond_test_successful_enslave_type_change
+"
+REQUIRE_MZ=no
+NUM_NETIFS=0
+lib_dir=$(dirname "$0")
+source "$lib_dir"/net_forwarding_lib.sh
+
+bond_check_flags()
+{
+       local bonddev=$1
+
+       ip -d l sh dev "$bonddev" | grep -q "MASTER"
+       check_err $? "MASTER flag is missing from the bond device"
+
+       ip -d l sh dev "$bonddev" | grep -q "SLAVE"
+       check_err $? "SLAVE flag is missing from the bond device"
+}
+
+# test enslaved bond dev type change from ARPHRD_ETHER and back
+# this allows us to test both MASTER and SLAVE flags at once
+bond_test_enslave_type_change()
+{
+       local test_success=$1
+       local devbond0="test-bond0"
+       local devbond1="test-bond1"
+       local devbond2="test-bond2"
+       local nonethdev="test-noneth0"
+
+       # create a non-ARPHRD_ETHER device for testing (e.g. nlmon type)
+       ip link add name "$nonethdev" type nlmon
+       check_err $? "could not create a non-ARPHRD_ETHER device (nlmon)"
+       ip link add name "$devbond0" type bond
+       if [ $test_success -eq 1 ]; then
+               # we need devbond0 in active-backup mode to successfully enslave nonethdev
+               ip link set dev "$devbond0" type bond mode active-backup
+               check_err $? "could not change bond mode to active-backup"
+       fi
+       ip link add name "$devbond1" type bond
+       ip link add name "$devbond2" type bond
+       ip link set dev "$devbond0" master "$devbond1"
+       check_err $? "could not enslave $devbond0 to $devbond1"
+       # change bond type to non-ARPHRD_ETHER
+       ip link set dev "$nonethdev" master "$devbond0" 1>/dev/null 2>/dev/null
+       ip link set dev "$nonethdev" nomaster 1>/dev/null 2>/dev/null
+       # restore ARPHRD_ETHER type by enslaving such device
+       ip link set dev "$devbond2" master "$devbond0"
+       check_err $? "could not enslave $devbond2 to $devbond0"
+       ip link set dev "$devbond1" nomaster
+
+       bond_check_flags "$devbond0"
+
+       # clean up
+       ip link del dev "$devbond0"
+       ip link del dev "$devbond1"
+       ip link del dev "$devbond2"
+       ip link del dev "$nonethdev"
+}
+
+bond_test_unsuccessful_enslave_type_change()
+{
+       RET=0
+
+       bond_test_enslave_type_change 0
+       log_test "Change ether type of an enslaved bond device with unsuccessful enslave"
+}
+
+bond_test_successful_enslave_type_change()
+{
+       RET=0
+
+       bond_test_enslave_type_change 1
+       log_test "Change ether type of an enslaved bond device with successful enslave"
+}
+
+tests_run
+
+exit "$EXIT_STATUS"
index 84a627c43795669a7839b93d4dc99c6773e7eeac..d66a0642cffd8acf1a1c1b5cf33f3d1ff1a62b8b 100644 (file)
@@ -141,6 +141,7 @@ TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list
 TEST_GEN_PROGS_aarch64 += aarch64/hypercalls
 TEST_GEN_PROGS_aarch64 += aarch64/page_fault_test
 TEST_GEN_PROGS_aarch64 += aarch64/psci_test
+TEST_GEN_PROGS_aarch64 += aarch64/smccc_filter
 TEST_GEN_PROGS_aarch64 += aarch64/vcpu_width_config
 TEST_GEN_PROGS_aarch64 += aarch64/vgic_init
 TEST_GEN_PROGS_aarch64 += aarch64/vgic_irq
index 26556a266021e62844e51f96e6a8bd58ef9c699d..8ef370924a02e5b7377093fc289fe2b9def039ae 100644 (file)
@@ -47,6 +47,7 @@ struct test_args {
        int nr_iter;
        int timer_period_ms;
        int migration_freq_ms;
+       struct kvm_arm_counter_offset offset;
 };
 
 static struct test_args test_args = {
@@ -54,6 +55,7 @@ static struct test_args test_args = {
        .nr_iter = NR_TEST_ITERS_DEF,
        .timer_period_ms = TIMER_TEST_PERIOD_MS_DEF,
        .migration_freq_ms = TIMER_TEST_MIGRATION_FREQ_MS,
+       .offset = { .reserved = 1 },
 };
 
 #define msecs_to_usecs(msec)           ((msec) * 1000LL)
@@ -121,25 +123,35 @@ static void guest_validate_irq(unsigned int intid,
        uint64_t xcnt = 0, xcnt_diff_us, cval = 0;
        unsigned long xctl = 0;
        unsigned int timer_irq = 0;
+       unsigned int accessor;
 
-       if (stage == GUEST_STAGE_VTIMER_CVAL ||
-               stage == GUEST_STAGE_VTIMER_TVAL) {
-               xctl = timer_get_ctl(VIRTUAL);
-               timer_set_ctl(VIRTUAL, CTL_IMASK);
-               xcnt = timer_get_cntct(VIRTUAL);
-               cval = timer_get_cval(VIRTUAL);
+       if (intid == IAR_SPURIOUS)
+               return;
+
+       switch (stage) {
+       case GUEST_STAGE_VTIMER_CVAL:
+       case GUEST_STAGE_VTIMER_TVAL:
+               accessor = VIRTUAL;
                timer_irq = vtimer_irq;
-       } else if (stage == GUEST_STAGE_PTIMER_CVAL ||
-               stage == GUEST_STAGE_PTIMER_TVAL) {
-               xctl = timer_get_ctl(PHYSICAL);
-               timer_set_ctl(PHYSICAL, CTL_IMASK);
-               xcnt = timer_get_cntct(PHYSICAL);
-               cval = timer_get_cval(PHYSICAL);
+               break;
+       case GUEST_STAGE_PTIMER_CVAL:
+       case GUEST_STAGE_PTIMER_TVAL:
+               accessor = PHYSICAL;
                timer_irq = ptimer_irq;
-       } else {
+               break;
+       default:
                GUEST_ASSERT(0);
+               return;
        }
 
+       xctl = timer_get_ctl(accessor);
+       if ((xctl & CTL_IMASK) || !(xctl & CTL_ENABLE))
+               return;
+
+       timer_set_ctl(accessor, CTL_IMASK);
+       xcnt = timer_get_cntct(accessor);
+       cval = timer_get_cval(accessor);
+
        xcnt_diff_us = cycles_to_usec(xcnt - shared_data->xcnt);
 
        /* Make sure we are dealing with the correct timer IRQ */
@@ -148,6 +160,8 @@ static void guest_validate_irq(unsigned int intid,
        /* Basic 'timer condition met' check */
        GUEST_ASSERT_3(xcnt >= cval, xcnt, cval, xcnt_diff_us);
        GUEST_ASSERT_1(xctl & CTL_ISTATUS, xctl);
+
+       WRITE_ONCE(shared_data->nr_iter, shared_data->nr_iter + 1);
 }
 
 static void guest_irq_handler(struct ex_regs *regs)
@@ -158,8 +172,6 @@ static void guest_irq_handler(struct ex_regs *regs)
 
        guest_validate_irq(intid, shared_data);
 
-       WRITE_ONCE(shared_data->nr_iter, shared_data->nr_iter + 1);
-
        gic_set_eoi(intid);
 }
 
@@ -372,6 +384,13 @@ static struct kvm_vm *test_vm_create(void)
        vm_init_descriptor_tables(vm);
        vm_install_exception_handler(vm, VECTOR_IRQ_CURRENT, guest_irq_handler);
 
+       if (!test_args.offset.reserved) {
+               if (kvm_has_cap(KVM_CAP_COUNTER_OFFSET))
+                       vm_ioctl(vm, KVM_ARM_SET_COUNTER_OFFSET, &test_args.offset);
+               else
+                       TEST_FAIL("no support for global offset\n");
+       }
+
        for (i = 0; i < nr_vcpus; i++)
                vcpu_init_descriptor_tables(vcpus[i]);
 
@@ -403,6 +422,7 @@ static void test_print_help(char *name)
                TIMER_TEST_PERIOD_MS_DEF);
        pr_info("\t-m: Frequency (in ms) of vCPUs to migrate to different pCPU. 0 to turn off (default: %u)\n",
                TIMER_TEST_MIGRATION_FREQ_MS);
+       pr_info("\t-o: Counter offset (in counter cycles, default: 0)\n");
        pr_info("\t-h: print this help screen\n");
 }
 
@@ -410,7 +430,7 @@ static bool parse_args(int argc, char *argv[])
 {
        int opt;
 
-       while ((opt = getopt(argc, argv, "hn:i:p:m:")) != -1) {
+       while ((opt = getopt(argc, argv, "hn:i:p:m:o:")) != -1) {
                switch (opt) {
                case 'n':
                        test_args.nr_vcpus = atoi_positive("Number of vCPUs", optarg);
@@ -429,6 +449,10 @@ static bool parse_args(int argc, char *argv[])
                case 'm':
                        test_args.migration_freq_ms = atoi_non_negative("Frequency", optarg);
                        break;
+               case 'o':
+                       test_args.offset.counter_offset = strtol(optarg, NULL, 0);
+                       test_args.offset.reserved = 0;
+                       break;
                case 'h':
                default:
                        goto err;
index d287dd2cac0a5e22d622d810f3af579b8f6fe673..d4e1f4af29d68b886a1e638f44b88b40596c74fd 100644 (file)
@@ -651,7 +651,7 @@ int main(int ac, char **av)
  * The current blessed list was primed with the output of kernel version
  * v4.15 with --core-reg-fixup and then later updated with new registers.
  *
- * The blessed list is up to date with kernel version v5.13-rc3
+ * The blessed list is up to date with kernel version v6.4 (or so we hope)
  */
 static __u64 base_regs[] = {
        KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[0]),
@@ -807,10 +807,10 @@ static __u64 base_regs[] = {
        ARM64_SYS_REG(3, 0, 0, 3, 7),
        ARM64_SYS_REG(3, 0, 0, 4, 0),   /* ID_AA64PFR0_EL1 */
        ARM64_SYS_REG(3, 0, 0, 4, 1),   /* ID_AA64PFR1_EL1 */
-       ARM64_SYS_REG(3, 0, 0, 4, 2),
+       ARM64_SYS_REG(3, 0, 0, 4, 2),   /* ID_AA64PFR2_EL1 */
        ARM64_SYS_REG(3, 0, 0, 4, 3),
        ARM64_SYS_REG(3, 0, 0, 4, 4),   /* ID_AA64ZFR0_EL1 */
-       ARM64_SYS_REG(3, 0, 0, 4, 5),
+       ARM64_SYS_REG(3, 0, 0, 4, 5),   /* ID_AA64SMFR0_EL1 */
        ARM64_SYS_REG(3, 0, 0, 4, 6),
        ARM64_SYS_REG(3, 0, 0, 4, 7),
        ARM64_SYS_REG(3, 0, 0, 5, 0),   /* ID_AA64DFR0_EL1 */
@@ -823,7 +823,7 @@ static __u64 base_regs[] = {
        ARM64_SYS_REG(3, 0, 0, 5, 7),
        ARM64_SYS_REG(3, 0, 0, 6, 0),   /* ID_AA64ISAR0_EL1 */
        ARM64_SYS_REG(3, 0, 0, 6, 1),   /* ID_AA64ISAR1_EL1 */
-       ARM64_SYS_REG(3, 0, 0, 6, 2),
+       ARM64_SYS_REG(3, 0, 0, 6, 2),   /* ID_AA64ISAR2_EL1 */
        ARM64_SYS_REG(3, 0, 0, 6, 3),
        ARM64_SYS_REG(3, 0, 0, 6, 4),
        ARM64_SYS_REG(3, 0, 0, 6, 5),
@@ -832,8 +832,8 @@ static __u64 base_regs[] = {
        ARM64_SYS_REG(3, 0, 0, 7, 0),   /* ID_AA64MMFR0_EL1 */
        ARM64_SYS_REG(3, 0, 0, 7, 1),   /* ID_AA64MMFR1_EL1 */
        ARM64_SYS_REG(3, 0, 0, 7, 2),   /* ID_AA64MMFR2_EL1 */
-       ARM64_SYS_REG(3, 0, 0, 7, 3),
-       ARM64_SYS_REG(3, 0, 0, 7, 4),
+       ARM64_SYS_REG(3, 0, 0, 7, 3),   /* ID_AA64MMFR3_EL1 */
+       ARM64_SYS_REG(3, 0, 0, 7, 4),   /* ID_AA64MMFR4_EL1 */
        ARM64_SYS_REG(3, 0, 0, 7, 5),
        ARM64_SYS_REG(3, 0, 0, 7, 6),
        ARM64_SYS_REG(3, 0, 0, 7, 7),
@@ -858,6 +858,9 @@ static __u64 base_regs[] = {
        ARM64_SYS_REG(3, 2, 0, 0, 0),   /* CSSELR_EL1 */
        ARM64_SYS_REG(3, 3, 13, 0, 2),  /* TPIDR_EL0 */
        ARM64_SYS_REG(3, 3, 13, 0, 3),  /* TPIDRRO_EL0 */
+       ARM64_SYS_REG(3, 3, 14, 0, 1),  /* CNTPCT_EL0 */
+       ARM64_SYS_REG(3, 3, 14, 2, 1),  /* CNTP_CTL_EL0 */
+       ARM64_SYS_REG(3, 3, 14, 2, 2),  /* CNTP_CVAL_EL0 */
        ARM64_SYS_REG(3, 4, 3, 0, 0),   /* DACR32_EL2 */
        ARM64_SYS_REG(3, 4, 5, 0, 1),   /* IFSR32_EL2 */
        ARM64_SYS_REG(3, 4, 5, 3, 0),   /* FPEXC32_EL2 */
diff --git a/tools/testing/selftests/kvm/aarch64/smccc_filter.c b/tools/testing/selftests/kvm/aarch64/smccc_filter.c
new file mode 100644 (file)
index 0000000..f4ceae9
--- /dev/null
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * smccc_filter - Tests for the SMCCC filter UAPI.
+ *
+ * Copyright (c) 2023 Google LLC
+ *
+ * This test includes:
+ *  - Tests that the UAPI constraints are upheld by KVM. For example, userspace
+ *    is prevented from filtering the architecture range of SMCCC calls.
+ *  - Test that the filter actions (DENIED, FWD_TO_USER) work as intended.
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/psci.h>
+#include <stdint.h>
+
+#include "processor.h"
+#include "test_util.h"
+
+enum smccc_conduit {
+       HVC_INSN,
+       SMC_INSN,
+};
+
+#define for_each_conduit(conduit)                                      \
+       for (conduit = HVC_INSN; conduit <= SMC_INSN; conduit++)
+
+static void guest_main(uint32_t func_id, enum smccc_conduit conduit)
+{
+       struct arm_smccc_res res;
+
+       if (conduit == SMC_INSN)
+               smccc_smc(func_id, 0, 0, 0, 0, 0, 0, 0, &res);
+       else
+               smccc_hvc(func_id, 0, 0, 0, 0, 0, 0, 0, &res);
+
+       GUEST_SYNC(res.a0);
+}
+
+static int __set_smccc_filter(struct kvm_vm *vm, uint32_t start, uint32_t nr_functions,
+                             enum kvm_smccc_filter_action action)
+{
+       struct kvm_smccc_filter filter = {
+               .base           = start,
+               .nr_functions   = nr_functions,
+               .action         = action,
+       };
+
+       return __kvm_device_attr_set(vm->fd, KVM_ARM_VM_SMCCC_CTRL,
+                                    KVM_ARM_VM_SMCCC_FILTER, &filter);
+}
+
+static void set_smccc_filter(struct kvm_vm *vm, uint32_t start, uint32_t nr_functions,
+                            enum kvm_smccc_filter_action action)
+{
+       int ret = __set_smccc_filter(vm, start, nr_functions, action);
+
+       TEST_ASSERT(!ret, "failed to configure SMCCC filter: %d", ret);
+}
+
+static struct kvm_vm *setup_vm(struct kvm_vcpu **vcpu)
+{
+       struct kvm_vcpu_init init;
+       struct kvm_vm *vm;
+
+       vm = vm_create(1);
+       vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init);
+
+       /*
+        * Enable in-kernel emulation of PSCI to ensure that calls are denied
+        * due to the SMCCC filter, not because of KVM.
+        */
+       init.features[0] |= (1 << KVM_ARM_VCPU_PSCI_0_2);
+
+       *vcpu = aarch64_vcpu_add(vm, 0, &init, guest_main);
+       return vm;
+}
+
+static void test_pad_must_be_zero(void)
+{
+       struct kvm_vcpu *vcpu;
+       struct kvm_vm *vm = setup_vm(&vcpu);
+       struct kvm_smccc_filter filter = {
+               .base           = PSCI_0_2_FN_PSCI_VERSION,
+               .nr_functions   = 1,
+               .action         = KVM_SMCCC_FILTER_DENY,
+               .pad            = { -1 },
+       };
+       int r;
+
+       r = __kvm_device_attr_set(vm->fd, KVM_ARM_VM_SMCCC_CTRL,
+                                 KVM_ARM_VM_SMCCC_FILTER, &filter);
+       TEST_ASSERT(r < 0 && errno == EINVAL,
+                   "Setting filter with nonzero padding should return EINVAL");
+}
+
+/* Ensure that userspace cannot filter the Arm Architecture SMCCC range */
+static void test_filter_reserved_range(void)
+{
+       struct kvm_vcpu *vcpu;
+       struct kvm_vm *vm = setup_vm(&vcpu);
+       uint32_t smc64_fn;
+       int r;
+
+       r = __set_smccc_filter(vm, ARM_SMCCC_ARCH_WORKAROUND_1,
+                              1, KVM_SMCCC_FILTER_DENY);
+       TEST_ASSERT(r < 0 && errno == EEXIST,
+                   "Attempt to filter reserved range should return EEXIST");
+
+       smc64_fn = ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_64,
+                                     0, 0);
+
+       r = __set_smccc_filter(vm, smc64_fn, 1, KVM_SMCCC_FILTER_DENY);
+       TEST_ASSERT(r < 0 && errno == EEXIST,
+                   "Attempt to filter reserved range should return EEXIST");
+
+       kvm_vm_free(vm);
+}
+
+static void test_invalid_nr_functions(void)
+{
+       struct kvm_vcpu *vcpu;
+       struct kvm_vm *vm = setup_vm(&vcpu);
+       int r;
+
+       r = __set_smccc_filter(vm, PSCI_0_2_FN64_CPU_ON, 0, KVM_SMCCC_FILTER_DENY);
+       TEST_ASSERT(r < 0 && errno == EINVAL,
+                   "Attempt to filter 0 functions should return EINVAL");
+
+       kvm_vm_free(vm);
+}
+
+static void test_overflow_nr_functions(void)
+{
+       struct kvm_vcpu *vcpu;
+       struct kvm_vm *vm = setup_vm(&vcpu);
+       int r;
+
+       r = __set_smccc_filter(vm, ~0, ~0, KVM_SMCCC_FILTER_DENY);
+       TEST_ASSERT(r < 0 && errno == EINVAL,
+                   "Attempt to overflow filter range should return EINVAL");
+
+       kvm_vm_free(vm);
+}
+
+static void test_reserved_action(void)
+{
+       struct kvm_vcpu *vcpu;
+       struct kvm_vm *vm = setup_vm(&vcpu);
+       int r;
+
+       r = __set_smccc_filter(vm, PSCI_0_2_FN64_CPU_ON, 1, -1);
+       TEST_ASSERT(r < 0 && errno == EINVAL,
+                   "Attempt to use reserved filter action should return EINVAL");
+
+       kvm_vm_free(vm);
+}
+
+
+/* Test that overlapping configurations of the SMCCC filter are rejected */
+static void test_filter_overlap(void)
+{
+       struct kvm_vcpu *vcpu;
+       struct kvm_vm *vm = setup_vm(&vcpu);
+       int r;
+
+       set_smccc_filter(vm, PSCI_0_2_FN64_CPU_ON, 1, KVM_SMCCC_FILTER_DENY);
+
+       r = __set_smccc_filter(vm, PSCI_0_2_FN64_CPU_ON, 1, KVM_SMCCC_FILTER_DENY);
+       TEST_ASSERT(r < 0 && errno == EEXIST,
+                   "Attempt to filter already configured range should return EEXIST");
+
+       kvm_vm_free(vm);
+}
+
+static void expect_call_denied(struct kvm_vcpu *vcpu)
+{
+       struct ucall uc;
+
+       if (get_ucall(vcpu, &uc) != UCALL_SYNC)
+               TEST_FAIL("Unexpected ucall: %lu\n", uc.cmd);
+
+       TEST_ASSERT(uc.args[1] == SMCCC_RET_NOT_SUPPORTED,
+                   "Unexpected SMCCC return code: %lu", uc.args[1]);
+}
+
+/* Denied SMCCC calls have a return code of SMCCC_RET_NOT_SUPPORTED */
+static void test_filter_denied(void)
+{
+       enum smccc_conduit conduit;
+       struct kvm_vcpu *vcpu;
+       struct kvm_vm *vm;
+
+       for_each_conduit(conduit) {
+               vm = setup_vm(&vcpu);
+
+               set_smccc_filter(vm, PSCI_0_2_FN_PSCI_VERSION, 1, KVM_SMCCC_FILTER_DENY);
+               vcpu_args_set(vcpu, 2, PSCI_0_2_FN_PSCI_VERSION, conduit);
+
+               vcpu_run(vcpu);
+               expect_call_denied(vcpu);
+
+               kvm_vm_free(vm);
+       }
+}
+
+static void expect_call_fwd_to_user(struct kvm_vcpu *vcpu, uint32_t func_id,
+                                   enum smccc_conduit conduit)
+{
+       struct kvm_run *run = vcpu->run;
+
+       TEST_ASSERT(run->exit_reason == KVM_EXIT_HYPERCALL,
+                   "Unexpected exit reason: %u", run->exit_reason);
+       TEST_ASSERT(run->hypercall.nr == func_id,
+                   "Unexpected SMCCC function: %llu", run->hypercall.nr);
+
+       if (conduit == SMC_INSN)
+               TEST_ASSERT(run->hypercall.flags & KVM_HYPERCALL_EXIT_SMC,
+                           "KVM_HYPERCALL_EXIT_SMC is not set");
+       else
+               TEST_ASSERT(!(run->hypercall.flags & KVM_HYPERCALL_EXIT_SMC),
+                           "KVM_HYPERCALL_EXIT_SMC is set");
+}
+
+/* SMCCC calls forwarded to userspace cause KVM_EXIT_HYPERCALL exits */
+static void test_filter_fwd_to_user(void)
+{
+       enum smccc_conduit conduit;
+       struct kvm_vcpu *vcpu;
+       struct kvm_vm *vm;
+
+       for_each_conduit(conduit) {
+               vm = setup_vm(&vcpu);
+
+               set_smccc_filter(vm, PSCI_0_2_FN_PSCI_VERSION, 1, KVM_SMCCC_FILTER_FWD_TO_USER);
+               vcpu_args_set(vcpu, 2, PSCI_0_2_FN_PSCI_VERSION, conduit);
+
+               vcpu_run(vcpu);
+               expect_call_fwd_to_user(vcpu, PSCI_0_2_FN_PSCI_VERSION, conduit);
+
+               kvm_vm_free(vm);
+       }
+}
+
+static bool kvm_supports_smccc_filter(void)
+{
+       struct kvm_vm *vm = vm_create_barebones();
+       int r;
+
+       r = __kvm_has_device_attr(vm->fd, KVM_ARM_VM_SMCCC_CTRL, KVM_ARM_VM_SMCCC_FILTER);
+
+       kvm_vm_free(vm);
+       return !r;
+}
+
+int main(void)
+{
+       TEST_REQUIRE(kvm_supports_smccc_filter());
+
+       test_pad_must_be_zero();
+       test_invalid_nr_functions();
+       test_overflow_nr_functions();
+       test_reserved_action();
+       test_filter_reserved_range();
+       test_filter_overlap();
+       test_filter_denied();
+       test_filter_fwd_to_user();
+}
index d011b38e259eafbb9781f915b24de49ab0803c30..8835fed09e9f0a1388d32d93a2d51d568d698711 100644 (file)
@@ -2,3 +2,4 @@ CONFIG_KVM=y
 CONFIG_KVM_INTEL=y
 CONFIG_KVM_AMD=y
 CONFIG_USERFAULTFD=y
+CONFIG_IDLE_PAGE_TRACKING=y
index 5f977528e09c0019f150f3b0c58ef45c503c44b1..cb537253a6b9c8e7d115ec60a67cb6bdd4f854b4 100644 (file)
@@ -214,6 +214,19 @@ void smccc_hvc(uint32_t function_id, uint64_t arg0, uint64_t arg1,
               uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5,
               uint64_t arg6, struct arm_smccc_res *res);
 
+/**
+ * smccc_smc - Invoke a SMCCC function using the smc conduit
+ * @function_id: the SMCCC function to be called
+ * @arg0-arg6: SMCCC function arguments, corresponding to registers x1-x7
+ * @res: pointer to write the return values from registers x0-x3
+ *
+ */
+void smccc_smc(uint32_t function_id, uint64_t arg0, uint64_t arg1,
+              uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5,
+              uint64_t arg6, struct arm_smccc_res *res);
+
+
+
 uint32_t guest_get_vcpuid(void);
 
 #endif /* SELFTEST_KVM_PROCESSOR_H */
index 5972a23b276542da19b370cbcbdb5ea6b1debe63..3a0259e25335334fd3e3cbb74d9ac5b4505cd69d 100644 (file)
@@ -58,10 +58,27 @@ static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva)
        return (gva >> vm->page_shift) & mask;
 }
 
-static uint64_t pte_addr(struct kvm_vm *vm, uint64_t entry)
+static uint64_t addr_pte(struct kvm_vm *vm, uint64_t pa, uint64_t attrs)
 {
-       uint64_t mask = ((1UL << (vm->va_bits - vm->page_shift)) - 1) << vm->page_shift;
-       return entry & mask;
+       uint64_t pte;
+
+       pte = pa & GENMASK(47, vm->page_shift);
+       if (vm->page_shift == 16)
+               pte |= FIELD_GET(GENMASK(51, 48), pa) << 12;
+       pte |= attrs;
+
+       return pte;
+}
+
+static uint64_t pte_addr(struct kvm_vm *vm, uint64_t pte)
+{
+       uint64_t pa;
+
+       pa = pte & GENMASK(47, vm->page_shift);
+       if (vm->page_shift == 16)
+               pa |= FIELD_GET(GENMASK(15, 12), pte) << 48;
+
+       return pa;
 }
 
 static uint64_t ptrs_per_pgd(struct kvm_vm *vm)
@@ -110,18 +127,18 @@ static void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
 
        ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, vaddr) * 8;
        if (!*ptep)
-               *ptep = vm_alloc_page_table(vm) | 3;
+               *ptep = addr_pte(vm, vm_alloc_page_table(vm), 3);
 
        switch (vm->pgtable_levels) {
        case 4:
                ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, vaddr) * 8;
                if (!*ptep)
-                       *ptep = vm_alloc_page_table(vm) | 3;
+                       *ptep = addr_pte(vm, vm_alloc_page_table(vm), 3);
                /* fall through */
        case 3:
                ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, vaddr) * 8;
                if (!*ptep)
-                       *ptep = vm_alloc_page_table(vm) | 3;
+                       *ptep = addr_pte(vm, vm_alloc_page_table(vm), 3);
                /* fall through */
        case 2:
                ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, vaddr) * 8;
@@ -130,8 +147,7 @@ static void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
                TEST_FAIL("Page table levels must be 2, 3, or 4");
        }
 
-       *ptep = paddr | 3;
-       *ptep |= (attr_idx << 2) | (1 << 10) /* Access Flag */;
+       *ptep = addr_pte(vm, paddr, (attr_idx << 2) | (1 << 10) | 3);  /* AF */
 }
 
 void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
@@ -226,7 +242,7 @@ void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init)
 {
        struct kvm_vcpu_init default_init = { .target = -1, };
        struct kvm_vm *vm = vcpu->vm;
-       uint64_t sctlr_el1, tcr_el1;
+       uint64_t sctlr_el1, tcr_el1, ttbr0_el1;
 
        if (!init)
                init = &default_init;
@@ -277,10 +293,13 @@ void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init)
                TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
        }
 
+       ttbr0_el1 = vm->pgd & GENMASK(47, vm->page_shift);
+
        /* Configure output size */
        switch (vm->mode) {
        case VM_MODE_P52V48_64K:
                tcr_el1 |= 6ul << 32; /* IPS = 52 bits */
+               ttbr0_el1 |= FIELD_GET(GENMASK(51, 48), vm->pgd) << 2;
                break;
        case VM_MODE_P48V48_4K:
        case VM_MODE_P48V48_16K:
@@ -310,7 +329,7 @@ void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init)
        vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), sctlr_el1);
        vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TCR_EL1), tcr_el1);
        vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_MAIR_EL1), DEFAULT_MAIR_EL1);
-       vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TTBR0_EL1), vm->pgd);
+       vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TTBR0_EL1), ttbr0_el1);
        vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TPIDR_EL1), vcpu->id);
 }
 
@@ -508,29 +527,43 @@ void aarch64_get_supported_page_sizes(uint32_t ipa,
        close(kvm_fd);
 }
 
+#define __smccc_call(insn, function_id, arg0, arg1, arg2, arg3, arg4, arg5,    \
+                    arg6, res)                                                 \
+       asm volatile("mov   w0, %w[function_id]\n"                              \
+                    "mov   x1, %[arg0]\n"                                      \
+                    "mov   x2, %[arg1]\n"                                      \
+                    "mov   x3, %[arg2]\n"                                      \
+                    "mov   x4, %[arg3]\n"                                      \
+                    "mov   x5, %[arg4]\n"                                      \
+                    "mov   x6, %[arg5]\n"                                      \
+                    "mov   x7, %[arg6]\n"                                      \
+                    #insn  "#0\n"                                              \
+                    "mov   %[res0], x0\n"                                      \
+                    "mov   %[res1], x1\n"                                      \
+                    "mov   %[res2], x2\n"                                      \
+                    "mov   %[res3], x3\n"                                      \
+                    : [res0] "=r"(res->a0), [res1] "=r"(res->a1),              \
+                      [res2] "=r"(res->a2), [res3] "=r"(res->a3)               \
+                    : [function_id] "r"(function_id), [arg0] "r"(arg0),        \
+                      [arg1] "r"(arg1), [arg2] "r"(arg2), [arg3] "r"(arg3),    \
+                      [arg4] "r"(arg4), [arg5] "r"(arg5), [arg6] "r"(arg6)     \
+                    : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7")
+
+
 void smccc_hvc(uint32_t function_id, uint64_t arg0, uint64_t arg1,
               uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5,
               uint64_t arg6, struct arm_smccc_res *res)
 {
-       asm volatile("mov   w0, %w[function_id]\n"
-                    "mov   x1, %[arg0]\n"
-                    "mov   x2, %[arg1]\n"
-                    "mov   x3, %[arg2]\n"
-                    "mov   x4, %[arg3]\n"
-                    "mov   x5, %[arg4]\n"
-                    "mov   x6, %[arg5]\n"
-                    "mov   x7, %[arg6]\n"
-                    "hvc   #0\n"
-                    "mov   %[res0], x0\n"
-                    "mov   %[res1], x1\n"
-                    "mov   %[res2], x2\n"
-                    "mov   %[res3], x3\n"
-                    : [res0] "=r"(res->a0), [res1] "=r"(res->a1),
-                      [res2] "=r"(res->a2), [res3] "=r"(res->a3)
-                    : [function_id] "r"(function_id), [arg0] "r"(arg0),
-                      [arg1] "r"(arg1), [arg2] "r"(arg2), [arg3] "r"(arg3),
-                      [arg4] "r"(arg4), [arg5] "r"(arg5), [arg6] "r"(arg6)
-                    : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7");
+       __smccc_call(hvc, function_id, arg0, arg1, arg2, arg3, arg4, arg5,
+                    arg6, res);
+}
+
+void smccc_smc(uint32_t function_id, uint64_t arg0, uint64_t arg1,
+              uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5,
+              uint64_t arg6, struct arm_smccc_res *res)
+{
+       __smccc_call(smc, function_id, arg0, arg1, arg2, arg3, arg4, arg5,
+                    arg6, res);
 }
 
 void kvm_selftest_arch_init(void)
index f7900e75d2306d2ff79d930e78a1c9110c69b48b..05400462c77996a71a439dfd7946904cbff06aa0 100644 (file)
@@ -10,12 +10,14 @@ endif
 CLANG_TARGET_FLAGS_arm          := arm-linux-gnueabi
 CLANG_TARGET_FLAGS_arm64        := aarch64-linux-gnu
 CLANG_TARGET_FLAGS_hexagon      := hexagon-linux-musl
+CLANG_TARGET_FLAGS_i386         := i386-linux-gnu
 CLANG_TARGET_FLAGS_m68k         := m68k-linux-gnu
 CLANG_TARGET_FLAGS_mips         := mipsel-linux-gnu
 CLANG_TARGET_FLAGS_powerpc      := powerpc64le-linux-gnu
 CLANG_TARGET_FLAGS_riscv        := riscv64-linux-gnu
 CLANG_TARGET_FLAGS_s390         := s390x-linux-gnu
 CLANG_TARGET_FLAGS_x86          := x86_64-linux-gnu
+CLANG_TARGET_FLAGS_x86_64       := x86_64-linux-gnu
 CLANG_TARGET_FLAGS              := $(CLANG_TARGET_FLAGS_$(ARCH))
 
 ifeq ($(CROSS_COMPILE),)
index f466a099f1bf75c5d39f810a165fdcf9bc104706..bc91bef5d254e5770d29188aae81f1259829b511 100644 (file)
@@ -163,9 +163,8 @@ TEST_F(mdwe, mprotect_WRITE_EXEC)
 
 TEST_F(mdwe, mmap_FIXED)
 {
-       void *p, *p2;
+       void *p;
 
-       p2 = mmap(NULL, self->size, PROT_READ | PROT_EXEC, self->flags, 0, 0);
        self->p = mmap(NULL, self->size, PROT_READ, self->flags, 0, 0);
        ASSERT_NE(self->p, MAP_FAILED);
 
index a6911cae368c77b49f0083def57bbfdac75463b5..80f06aa620345815171f8f708043ddbbc1beabcb 100644 (file)
@@ -1,6 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0-only
 bind_bhash
 bind_timewait
+bind_wildcard
 csum
 cmsg_sender
 diag_uid
index 6cd8993454d7e0b647e5db3fb80526b18de0ce6f..80fbfe0330f6ec726b60b71761ca64df6e9d5b2f 100644 (file)
@@ -80,6 +80,7 @@ TEST_GEN_FILES += sctp_hello
 TEST_GEN_FILES += csum
 TEST_GEN_FILES += nat6to4.o
 TEST_GEN_FILES += ip_local_port_range
+TEST_GEN_FILES += bind_wildcard
 
 TEST_FILES := settings
 
diff --git a/tools/testing/selftests/net/bind_wildcard.c b/tools/testing/selftests/net/bind_wildcard.c
new file mode 100644 (file)
index 0000000..58edfc1
--- /dev/null
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright Amazon.com Inc. or its affiliates. */
+
+#include <sys/socket.h>
+#include <netinet/in.h>
+
+#include "../kselftest_harness.h"
+
+FIXTURE(bind_wildcard)
+{
+       struct sockaddr_in addr4;
+       struct sockaddr_in6 addr6;
+       int expected_errno;
+};
+
+FIXTURE_VARIANT(bind_wildcard)
+{
+       const __u32 addr4_const;
+       const struct in6_addr *addr6_const;
+};
+
+FIXTURE_VARIANT_ADD(bind_wildcard, v4_any_v6_any)
+{
+       .addr4_const = INADDR_ANY,
+       .addr6_const = &in6addr_any,
+};
+
+FIXTURE_VARIANT_ADD(bind_wildcard, v4_any_v6_local)
+{
+       .addr4_const = INADDR_ANY,
+       .addr6_const = &in6addr_loopback,
+};
+
+FIXTURE_VARIANT_ADD(bind_wildcard, v4_local_v6_any)
+{
+       .addr4_const = INADDR_LOOPBACK,
+       .addr6_const = &in6addr_any,
+};
+
+FIXTURE_VARIANT_ADD(bind_wildcard, v4_local_v6_local)
+{
+       .addr4_const = INADDR_LOOPBACK,
+       .addr6_const = &in6addr_loopback,
+};
+
+FIXTURE_SETUP(bind_wildcard)
+{
+       self->addr4.sin_family = AF_INET;
+       self->addr4.sin_port = htons(0);
+       self->addr4.sin_addr.s_addr = htonl(variant->addr4_const);
+
+       self->addr6.sin6_family = AF_INET6;
+       self->addr6.sin6_port = htons(0);
+       self->addr6.sin6_addr = *variant->addr6_const;
+
+       if (variant->addr6_const == &in6addr_any)
+               self->expected_errno = EADDRINUSE;
+       else
+               self->expected_errno = 0;
+}
+
+FIXTURE_TEARDOWN(bind_wildcard)
+{
+}
+
+void bind_sockets(struct __test_metadata *_metadata,
+                 FIXTURE_DATA(bind_wildcard) *self,
+                 struct sockaddr *addr1, socklen_t addrlen1,
+                 struct sockaddr *addr2, socklen_t addrlen2)
+{
+       int fd[2];
+       int ret;
+
+       fd[0] = socket(addr1->sa_family, SOCK_STREAM, 0);
+       ASSERT_GT(fd[0], 0);
+
+       ret = bind(fd[0], addr1, addrlen1);
+       ASSERT_EQ(ret, 0);
+
+       ret = getsockname(fd[0], addr1, &addrlen1);
+       ASSERT_EQ(ret, 0);
+
+       ((struct sockaddr_in *)addr2)->sin_port = ((struct sockaddr_in *)addr1)->sin_port;
+
+       fd[1] = socket(addr2->sa_family, SOCK_STREAM, 0);
+       ASSERT_GT(fd[1], 0);
+
+       ret = bind(fd[1], addr2, addrlen2);
+       if (self->expected_errno) {
+               ASSERT_EQ(ret, -1);
+               ASSERT_EQ(errno, self->expected_errno);
+       } else {
+               ASSERT_EQ(ret, 0);
+       }
+
+       close(fd[1]);
+       close(fd[0]);
+}
+
+TEST_F(bind_wildcard, v4_v6)
+{
+       bind_sockets(_metadata, self,
+                    (struct sockaddr *)&self->addr4, sizeof(self->addr6),
+                    (struct sockaddr *)&self->addr6, sizeof(self->addr6));
+}
+
+TEST_F(bind_wildcard, v6_v4)
+{
+       bind_sockets(_metadata, self,
+                    (struct sockaddr *)&self->addr6, sizeof(self->addr6),
+                    (struct sockaddr *)&self->addr4, sizeof(self->addr4));
+}
+
+TEST_HARNESS_MAIN
index 2b5d6ff8737388f60b9b8cef771368e8dd49d81b..2d84c7a0be6b21e7f3fbbc5472e88d1f28220a75 100755 (executable)
@@ -59,6 +59,8 @@ class devlink_ports(object):
         assert stderr == ""
         ports = json.loads(stdout)['port']
 
+        validate_devlink_output(ports, 'flavour')
+
         for port in ports:
             if dev in port:
                 if ports[port]['flavour'] == 'physical':
@@ -220,6 +222,27 @@ def split_splittable_port(port, k, lanes, dev):
     unsplit(port.bus_info)
 
 
+def validate_devlink_output(devlink_data, target_property=None):
+    """
+    Determine if test should be skipped by checking:
+      1. devlink_data contains values
+      2. The target_property exist in devlink_data
+    """
+    skip_reason = None
+    if any(devlink_data.values()):
+        if target_property:
+            skip_reason = "{} not found in devlink output, test skipped".format(target_property)
+            for key in devlink_data:
+                if target_property in devlink_data[key]:
+                    skip_reason = None
+    else:
+        skip_reason = 'devlink output is empty, test skipped'
+
+    if skip_reason:
+        print(skip_reason)
+        sys.exit(KSFT_SKIP)
+
+
 def make_parser():
     parser = argparse.ArgumentParser(description='A test for port splitting.')
     parser.add_argument('--dev',
@@ -240,12 +263,9 @@ def main(cmdline=None):
         stdout, stderr = run_command(cmd)
         assert stderr == ""
 
+        validate_devlink_output(json.loads(stdout))
         devs = json.loads(stdout)['dev']
-        if devs:
-            dev = list(devs.keys())[0]
-        else:
-            print("no devlink device was found, test skipped")
-            sys.exit(KSFT_SKIP)
+        dev = list(devs.keys())[0]
 
     cmd = "devlink dev show %s" % dev
     stdout, stderr = run_command(cmd)
@@ -255,6 +275,7 @@ def main(cmdline=None):
 
     ports = devlink_ports(dev)
 
+    found_max_lanes = False
     for port in ports.if_names:
         max_lanes = get_max_lanes(port.name)
 
@@ -277,6 +298,11 @@ def main(cmdline=None):
                 split_splittable_port(port, lane, max_lanes, dev)
 
                 lane //= 2
+        found_max_lanes = True
+
+    if not found_max_lanes:
+        print(f"Test not started, no port of device {dev} reports max_lanes")
+        sys.exit(KSFT_SKIP)
 
 
 if __name__ == "__main__":
index 66c5be25c13d03892d878b4f58154f657daa54c9..48e52f995a98c84c7c94c0c20cf87ff66fac2c1f 100755 (executable)
@@ -240,7 +240,7 @@ check_expected_one()
        fi
 
        stdbuf -o0 -e0 printf "\tExpected value for '%s': '%s', got '%s'.\n" \
-               "${var}" "${!var}" "${!exp}"
+               "${var}" "${!exp}" "${!var}"
        return 1
 }
 
index 625e42901237c0c47db7bb129ff9921e3633bfde..d884fd69dd510bce52c684f496eb68b9d5ba866b 100644 (file)
 #include <sys/auxv.h>
 #include <sys/mman.h>
 #include <sys/shm.h>
+#include <sys/ptrace.h>
 #include <sys/syscall.h>
 #include <sys/wait.h>
+#include <sys/uio.h>
 
 #include "../kselftest.h" /* For __cpuid_count() */
 
@@ -583,6 +585,13 @@ static void test_dynamic_state(void)
        _exit(0);
 }
 
+static inline int __compare_tiledata_state(struct xsave_buffer *xbuf1, struct xsave_buffer *xbuf2)
+{
+       return memcmp(&xbuf1->bytes[xtiledata.xbuf_offset],
+                     &xbuf2->bytes[xtiledata.xbuf_offset],
+                     xtiledata.size);
+}
+
 /*
  * Save current register state and compare it to @xbuf1.'
  *
@@ -599,9 +608,7 @@ static inline bool __validate_tiledata_regs(struct xsave_buffer *xbuf1)
                fatal_error("failed to allocate XSAVE buffer\n");
 
        xsave(xbuf2, XFEATURE_MASK_XTILEDATA);
-       ret = memcmp(&xbuf1->bytes[xtiledata.xbuf_offset],
-                    &xbuf2->bytes[xtiledata.xbuf_offset],
-                    xtiledata.size);
+       ret = __compare_tiledata_state(xbuf1, xbuf2);
 
        free(xbuf2);
 
@@ -826,6 +833,99 @@ static void test_context_switch(void)
        free(finfo);
 }
 
+/* Ptrace test */
+
+/*
+ * Make sure the ptracee has the expanded kernel buffer on the first
+ * use. Then, initialize the state before performing the state
+ * injection from the ptracer.
+ */
+static inline void ptracee_firstuse_tiledata(void)
+{
+       load_rand_tiledata(stashed_xsave);
+       init_xtiledata();
+}
+
+/*
+ * Ptracer injects the randomized tile data state. It also reads
+ * before and after that, which will execute the kernel's state copy
+ * functions. So, the tester is advised to double-check any emitted
+ * kernel messages.
+ */
+static void ptracer_inject_tiledata(pid_t target)
+{
+       struct xsave_buffer *xbuf;
+       struct iovec iov;
+
+       xbuf = alloc_xbuf();
+       if (!xbuf)
+               fatal_error("unable to allocate XSAVE buffer");
+
+       printf("\tRead the init'ed tiledata via ptrace().\n");
+
+       iov.iov_base = xbuf;
+       iov.iov_len = xbuf_size;
+
+       memset(stashed_xsave, 0, xbuf_size);
+
+       if (ptrace(PTRACE_GETREGSET, target, (uint32_t)NT_X86_XSTATE, &iov))
+               fatal_error("PTRACE_GETREGSET");
+
+       if (!__compare_tiledata_state(stashed_xsave, xbuf))
+               printf("[OK]\tThe init'ed tiledata was read from ptracee.\n");
+       else
+               printf("[FAIL]\tThe init'ed tiledata was not read from ptracee.\n");
+
+       printf("\tInject tiledata via ptrace().\n");
+
+       load_rand_tiledata(xbuf);
+
+       memcpy(&stashed_xsave->bytes[xtiledata.xbuf_offset],
+              &xbuf->bytes[xtiledata.xbuf_offset],
+              xtiledata.size);
+
+       if (ptrace(PTRACE_SETREGSET, target, (uint32_t)NT_X86_XSTATE, &iov))
+               fatal_error("PTRACE_SETREGSET");
+
+       if (ptrace(PTRACE_GETREGSET, target, (uint32_t)NT_X86_XSTATE, &iov))
+               fatal_error("PTRACE_GETREGSET");
+
+       if (!__compare_tiledata_state(stashed_xsave, xbuf))
+               printf("[OK]\tTiledata was correctly written to ptracee.\n");
+       else
+               printf("[FAIL]\tTiledata was not correctly written to ptracee.\n");
+}
+
+static void test_ptrace(void)
+{
+       pid_t child;
+       int status;
+
+       child = fork();
+       if (child < 0) {
+               err(1, "fork");
+       } else if (!child) {
+               if (ptrace(PTRACE_TRACEME, 0, NULL, NULL))
+                       err(1, "PTRACE_TRACEME");
+
+               ptracee_firstuse_tiledata();
+
+               raise(SIGTRAP);
+               _exit(0);
+       }
+
+       do {
+               wait(&status);
+       } while (WSTOPSIG(status) != SIGTRAP);
+
+       ptracer_inject_tiledata(child);
+
+       ptrace(PTRACE_DETACH, child, NULL, NULL);
+       wait(&status);
+       if (!WIFEXITED(status) || WEXITSTATUS(status))
+               err(1, "ptrace test");
+}
+
 int main(void)
 {
        /* Check hardware availability at first */
@@ -846,6 +946,8 @@ int main(void)
        ctxtswtest_config.num_threads = 5;
        test_context_switch();
 
+       test_ptrace();
+
        clearhandler(SIGILL);
        free_stashed_xsave();
 
index 67e9f9df3a8c4eae6421fe23b8bcef89319140ea..3de10dbb50f52ec60899f0ac89ed60f464743946 100644 (file)
@@ -860,6 +860,114 @@ static void test_stream_poll_rcvlowat_client(const struct test_opts *opts)
        close(fd);
 }
 
+#define INV_BUF_TEST_DATA_LEN 512
+
+static void test_inv_buf_client(const struct test_opts *opts, bool stream)
+{
+       unsigned char data[INV_BUF_TEST_DATA_LEN] = {0};
+       ssize_t ret;
+       int fd;
+
+       if (stream)
+               fd = vsock_stream_connect(opts->peer_cid, 1234);
+       else
+               fd = vsock_seqpacket_connect(opts->peer_cid, 1234);
+
+       if (fd < 0) {
+               perror("connect");
+               exit(EXIT_FAILURE);
+       }
+
+       control_expectln("SENDDONE");
+
+       /* Use invalid buffer here. */
+       ret = recv(fd, NULL, sizeof(data), 0);
+       if (ret != -1) {
+               fprintf(stderr, "expected recv(2) failure, got %zi\n", ret);
+               exit(EXIT_FAILURE);
+       }
+
+       if (errno != ENOMEM) {
+               fprintf(stderr, "unexpected recv(2) errno %d\n", errno);
+               exit(EXIT_FAILURE);
+       }
+
+       ret = recv(fd, data, sizeof(data), MSG_DONTWAIT);
+
+       if (stream) {
+               /* For SOCK_STREAM we must continue reading. */
+               if (ret != sizeof(data)) {
+                       fprintf(stderr, "expected recv(2) success, got %zi\n", ret);
+                       exit(EXIT_FAILURE);
+               }
+               /* Don't check errno in case of success. */
+       } else {
+               /* For SOCK_SEQPACKET socket's queue must be empty. */
+               if (ret != -1) {
+                       fprintf(stderr, "expected recv(2) failure, got %zi\n", ret);
+                       exit(EXIT_FAILURE);
+               }
+
+               if (errno != EAGAIN) {
+                       fprintf(stderr, "unexpected recv(2) errno %d\n", errno);
+                       exit(EXIT_FAILURE);
+               }
+       }
+
+       control_writeln("DONE");
+
+       close(fd);
+}
+
+static void test_inv_buf_server(const struct test_opts *opts, bool stream)
+{
+       unsigned char data[INV_BUF_TEST_DATA_LEN] = {0};
+       ssize_t res;
+       int fd;
+
+       if (stream)
+               fd = vsock_stream_accept(VMADDR_CID_ANY, 1234, NULL);
+       else
+               fd = vsock_seqpacket_accept(VMADDR_CID_ANY, 1234, NULL);
+
+       if (fd < 0) {
+               perror("accept");
+               exit(EXIT_FAILURE);
+       }
+
+       res = send(fd, data, sizeof(data), 0);
+       if (res != sizeof(data)) {
+               fprintf(stderr, "unexpected send(2) result %zi\n", res);
+               exit(EXIT_FAILURE);
+       }
+
+       control_writeln("SENDDONE");
+
+       control_expectln("DONE");
+
+       close(fd);
+}
+
+static void test_stream_inv_buf_client(const struct test_opts *opts)
+{
+       test_inv_buf_client(opts, true);
+}
+
+static void test_stream_inv_buf_server(const struct test_opts *opts)
+{
+       test_inv_buf_server(opts, true);
+}
+
+static void test_seqpacket_inv_buf_client(const struct test_opts *opts)
+{
+       test_inv_buf_client(opts, false);
+}
+
+static void test_seqpacket_inv_buf_server(const struct test_opts *opts)
+{
+       test_inv_buf_server(opts, false);
+}
+
 static struct test_case test_cases[] = {
        {
                .name = "SOCK_STREAM connection reset",
@@ -920,6 +1028,16 @@ static struct test_case test_cases[] = {
                .run_client = test_seqpacket_bigmsg_client,
                .run_server = test_seqpacket_bigmsg_server,
        },
+       {
+               .name = "SOCK_STREAM test invalid buffer",
+               .run_client = test_stream_inv_buf_client,
+               .run_server = test_stream_inv_buf_server,
+       },
+       {
+               .name = "SOCK_SEQPACKET test invalid buffer",
+               .run_client = test_seqpacket_inv_buf_client,
+               .run_server = test_seqpacket_inv_buf_server,
+       },
        {},
 };
 
index 075588c4da0815227ccb95609d477477d99203a4..9934d48d9a55772f4c49ad513f8f8c651370d736 100644 (file)
@@ -2,3 +2,4 @@
 *.d
 virtio_test
 vringh_test
+virtio-trace/trace-agent
index f40b72eb0e7bfd699c8c9f1c8ee7adc6b6a3937d..d1abb331ea682eb1f726e0587a81e519ea9ee131 100644 (file)
@@ -1298,7 +1298,7 @@ static void kvm_destroy_vm(struct kvm *kvm)
         * At this point, pending calls to invalidate_range_start()
         * have completed but no more MMU notifiers will run, so
         * mn_active_invalidate_count may remain unbalanced.
-        * No threads can be waiting in install_new_memslots as the
+        * No threads can be waiting in kvm_swap_active_memslots() as the
         * last reference on KVM has been dropped, but freeing
         * memslots would deadlock without this manual intervention.
         */
@@ -1742,13 +1742,13 @@ static void kvm_invalidate_memslot(struct kvm *kvm,
        kvm_arch_flush_shadow_memslot(kvm, old);
        kvm_arch_guest_memory_reclaimed(kvm);
 
-       /* Was released by kvm_swap_active_memslots, reacquire. */
+       /* Was released by kvm_swap_active_memslots(), reacquire. */
        mutex_lock(&kvm->slots_arch_lock);
 
        /*
         * Copy the arch-specific field of the newly-installed slot back to the
         * old slot as the arch data could have changed between releasing
-        * slots_arch_lock in install_new_memslots() and re-acquiring the lock
+        * slots_arch_lock in kvm_swap_active_memslots() and re-acquiring the lock
         * above.  Writers are required to retrieve memslots *after* acquiring
         * slots_arch_lock, thus the active slot's data is guaranteed to be fresh.
         */
@@ -1810,11 +1810,11 @@ static int kvm_set_memslot(struct kvm *kvm,
        int r;
 
        /*
-        * Released in kvm_swap_active_memslots.
+        * Released in kvm_swap_active_memslots().
         *
-        * Must be held from before the current memslots are copied until
-        * after the new memslots are installed with rcu_assign_pointer,
-        * then released before the synchronize srcu in kvm_swap_active_memslots.
+        * Must be held from before the current memslots are copied until after
+        * the new memslots are installed with rcu_assign_pointer, then
+        * released before the synchronize srcu in kvm_swap_active_memslots().
         *
         * When modifying memslots outside of the slots_lock, must be held
         * before reading the pointer to the current memslots until after all
@@ -3866,7 +3866,7 @@ static int create_vcpu_fd(struct kvm_vcpu *vcpu)
 #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
 static int vcpu_get_pid(void *data, u64 *val)
 {
-       struct kvm_vcpu *vcpu = (struct kvm_vcpu *) data;
+       struct kvm_vcpu *vcpu = data;
        *val = pid_nr(rcu_access_pointer(vcpu->pid));
        return 0;
 }
@@ -5572,8 +5572,7 @@ static int kvm_debugfs_open(struct inode *inode, struct file *file,
                           const char *fmt)
 {
        int ret;
-       struct kvm_stat_data *stat_data = (struct kvm_stat_data *)
-                                         inode->i_private;
+       struct kvm_stat_data *stat_data = inode->i_private;
 
        /*
         * The debugfs files are a reference to the kvm struct which
@@ -5594,8 +5593,7 @@ static int kvm_debugfs_open(struct inode *inode, struct file *file,
 
 static int kvm_debugfs_release(struct inode *inode, struct file *file)
 {
-       struct kvm_stat_data *stat_data = (struct kvm_stat_data *)
-                                         inode->i_private;
+       struct kvm_stat_data *stat_data = inode->i_private;
 
        simple_attr_release(inode, file);
        kvm_put_kvm(stat_data->kvm);
@@ -5644,7 +5642,7 @@ static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset)
 static int kvm_stat_data_get(void *data, u64 *val)
 {
        int r = -EFAULT;
-       struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
+       struct kvm_stat_data *stat_data = data;
 
        switch (stat_data->kind) {
        case KVM_STAT_VM:
@@ -5663,7 +5661,7 @@ static int kvm_stat_data_get(void *data, u64 *val)
 static int kvm_stat_data_clear(void *data, u64 val)
 {
        int r = -EFAULT;
-       struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
+       struct kvm_stat_data *stat_data = data;
 
        if (val)
                return -EINVAL;